Compare commits

..

No commits in common. "master" and "v0.3.0" have entirely different histories.

729 changed files with 8984 additions and 65061 deletions

View file

@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# clang-format configuration file. Intended for clang-format >= 12.
# clang-format configuration file. Intended for clang-format >= 7.
#
# For more information, see:
#
@ -75,7 +75,6 @@ IncludeCategories:
Priority: 9
# Qt includes (match before C++ standard library)
- Regex: '<Q([A-Za-z0-9\-_])+>'
CaseSensitive: true
Priority: 9
# Headers in <> with an extension. (+system libraries)
- Regex: '<([A-Za-z0-9\-_])+\.h>'

View file

@ -1,29 +0,0 @@
# SPDX-License-Identifier: CC0-1.0
root = true
[*]
charset = utf-8
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
[*.{cpp,h}]
indent_size = 8
indent_style = tab
[*.json]
indent_size = 4
indent_style = space
[*.py]
indent_size = 4
indent_style = space
[*.yaml]
indent_size = 2
indent_style = space
[{meson.build,meson_options.txt}]
indent_size = 4
indent_style = space

1
.gitignore vendored
View file

@ -6,4 +6,3 @@
*.patch
*.pyc
__pycache__/
venv/

View file

@ -1,33 +0,0 @@
# SPDX-License-Identifier: CC-BY-SA-4.0
@INCLUDE_PATH = @TOP_BUILDDIR@/Documentation
@INCLUDE = Doxyfile-common
HIDE_UNDOC_CLASSES = NO
HIDE_UNDOC_MEMBERS = NO
HTML_OUTPUT = internal-api-html
INTERNAL_DOCS = YES
ENABLED_SECTIONS = internal
INPUT = "@TOP_SRCDIR@/Documentation" \
"@TOP_SRCDIR@/include/libcamera" \
"@TOP_SRCDIR@/src/ipa/ipu3" \
"@TOP_SRCDIR@/src/ipa/libipa" \
"@TOP_SRCDIR@/src/libcamera" \
"@TOP_BUILDDIR@/include/libcamera" \
"@TOP_BUILDDIR@/src/libcamera"
EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/span.h \
@TOP_SRCDIR@/include/libcamera/internal/device_enumerator_sysfs.h \
@TOP_SRCDIR@/include/libcamera/internal/device_enumerator_udev.h \
@TOP_SRCDIR@/include/libcamera/internal/ipc_pipe_unixsocket.h \
@TOP_SRCDIR@/src/libcamera/device_enumerator_sysfs.cpp \
@TOP_SRCDIR@/src/libcamera/device_enumerator_udev.cpp \
@TOP_SRCDIR@/src/libcamera/ipc_pipe_unixsocket.cpp \
@TOP_SRCDIR@/src/libcamera/pipeline/ \
@TOP_SRCDIR@/src/libcamera/sensor/camera_sensor_legacy.cpp \
@TOP_SRCDIR@/src/libcamera/sensor/camera_sensor_raw.cpp \
@TOP_SRCDIR@/src/libcamera/tracepoints.cpp \
@TOP_BUILDDIR@/include/libcamera/internal/tracepoints.h \
@TOP_BUILDDIR@/include/libcamera/ipa/soft_ipa_interface.h \
@TOP_BUILDDIR@/src/libcamera/proxy/

View file

@ -1,20 +0,0 @@
# SPDX-License-Identifier: CC-BY-SA-4.0
@INCLUDE_PATH = @TOP_BUILDDIR@/Documentation
@INCLUDE = Doxyfile-common
HIDE_UNDOC_CLASSES = YES
HIDE_UNDOC_MEMBERS = YES
HTML_OUTPUT = api-html
INTERNAL_DOCS = NO
INPUT = "@TOP_SRCDIR@/Documentation" \
${inputs}
EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/class.h \
@TOP_SRCDIR@/include/libcamera/base/object.h \
@TOP_SRCDIR@/include/libcamera/base/span.h \
@TOP_SRCDIR@/src/libcamera/base/class.cpp \
@TOP_SRCDIR@/src/libcamera/base/object.cpp
PREDEFINED += __DOXYGEN_PUBLIC__

View file

@ -22,17 +22,35 @@ CASE_SENSE_NAMES = YES
QUIET = YES
WARN_AS_ERROR = @WARN_AS_ERROR@
INPUT = "@TOP_SRCDIR@/include/libcamera" \
"@TOP_SRCDIR@/src/ipa/ipu3" \
"@TOP_SRCDIR@/src/ipa/libipa" \
"@TOP_SRCDIR@/src/libcamera" \
"@TOP_BUILDDIR@/include/libcamera" \
"@TOP_BUILDDIR@/src/libcamera"
FILE_PATTERNS = *.c \
*.cpp \
*.dox \
*.h
RECURSIVE = YES
EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/span.h \
@TOP_SRCDIR@/include/libcamera/internal/device_enumerator_sysfs.h \
@TOP_SRCDIR@/include/libcamera/internal/device_enumerator_udev.h \
@TOP_SRCDIR@/include/libcamera/internal/ipc_pipe_unixsocket.h \
@TOP_SRCDIR@/src/libcamera/device_enumerator_sysfs.cpp \
@TOP_SRCDIR@/src/libcamera/device_enumerator_udev.cpp \
@TOP_SRCDIR@/src/libcamera/ipc_pipe_unixsocket.cpp \
@TOP_SRCDIR@/src/libcamera/pipeline/ \
@TOP_SRCDIR@/src/libcamera/tracepoints.cpp \
@TOP_BUILDDIR@/include/libcamera/internal/tracepoints.h \
@TOP_BUILDDIR@/include/libcamera/ipa/soft_ipa_interface.h \
@TOP_BUILDDIR@/src/libcamera/proxy/
EXCLUDE_PATTERNS = @TOP_BUILDDIR@/include/libcamera/ipa/*_serializer.h \
@TOP_BUILDDIR@/include/libcamera/ipa/*_proxy.h \
@TOP_BUILDDIR@/include/libcamera/ipa/ipu3_*.h \
@TOP_BUILDDIR@/include/libcamera/ipa/mali-c55_*.h \
@TOP_BUILDDIR@/include/libcamera/ipa/raspberrypi_*.h \
@TOP_BUILDDIR@/include/libcamera/ipa/rkisp1_*.h \
@TOP_BUILDDIR@/include/libcamera/ipa/vimc_*.h
@ -52,13 +70,14 @@ EXCLUDE_SYMBOLS = libcamera::BoundMethodArgs \
EXCLUDE_SYMLINKS = YES
HTML_OUTPUT = api-html
GENERATE_LATEX = NO
MACRO_EXPANSION = YES
EXPAND_ONLY_PREDEF = YES
INCLUDE_PATH = "@TOP_BUILDDIR@/include" \
"@TOP_SRCDIR@/include"
INCLUDE_PATH = "@TOP_SRCDIR@/include/libcamera"
INCLUDE_FILE_PATTERNS = *.h
IMAGE_PATH = "@TOP_SRCDIR@/Documentation/images"

View file

@ -2,7 +2,7 @@
.. _api:
API Reference
=============
API
===
:: Placeholder for Doxygen documentation

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
.. _camera-sensor-model:
.. todo: Move to Doxygen-generated documentation

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-4.0
.. include:: documentation-contents.rst
.. _code-of-conduct:
Contributor Covenant Code of Conduct

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
.. _coding-style-guidelines:
Coding Style Guidelines

View file

@ -37,11 +37,8 @@ author = u'Kieran Bingham, Jacopo Mondi, Laurent Pinchart, Niklas Söderlund'
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.graphviz'
]
graphviz_output_format = 'svg'
# Add any paths that contain templates here, relative to this directory.
templates_path = []
@ -64,12 +61,7 @@ language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
'_build',
'Thumbs.db',
'.DS_Store',
'documentation-contents.rst',
]
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None

View file

@ -1,331 +0,0 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
Design of Exposure and Gain controls
====================================
This document explains the design and rationale of the controls related to
exposure and gain. This includes the all-encompassing auto-exposure (AE), the
manual exposure control, and the manual gain control.
Description of the problem
--------------------------
Sub controls
^^^^^^^^^^^^
There are more than one control that make up total exposure: exposure time,
gain, and aperture (though for now we will not consider aperture). We already
had individual controls for setting the values of manual exposure and manual
gain, but for switching between auto mode and manual mode we only had a
high-level boolean AeEnable control that would set *both* exposure and gain to
auto mode or manual mode; we had no way to set one to auto and the other to
manual.
So, we need to introduce two new controls to act as "levers" to indicate
individually for exposure and gain if the value would come from AEGC or if it
would come from the manual control value.
Aperture priority
^^^^^^^^^^^^^^^^^
We eventually may need to support aperture, and so whatever our solution is for
having only some controls on auto and the others on manual needs to be
extensible.
Flickering when going from auto to manual
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When a manual exposure or gain value is requested by the application, it costs
a few frames worth of time for them to take effect. This means that during a
transition from auto to manual, there would be flickering in the control values
and the transition won't be smooth.
Take for instance the following flow, where we start on auto exposure (which
for the purposes of the example increments by 1 each frame) and we want to
switch seamlessly to manual exposure, which involves copying the exposure value
computed by the auto exposure algorithm:
::
+-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
| N | | N+1 | | N+2 | | N+3 | | N+4 | | N+5 | | N+6 |
+-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
Mode requested: Auto Auto Auto Manual Manual Manual Manual
Exp requested: N/A N/A N/A 2 2 2 2
Set in Frame: N+2 N+3 N+4 N+5 N+6 N+7 N+8
Mode used: Auto Auto Auto Auto Auto Manual Manual
Exp used: 0 1 2 3 4 2 2
As we can see, after frame N+2 completes, we copy the exposure value that was
used for frame N+2 (which was computed by AE algorithm), and queue that value
into request N+3 with manual mode on. However, as it takes two frames for the
exposure to be set, the exposure still changes since it is set by AE, and we
get a flicker in the exposure during the switch from auto to manual.
A solution is to *not submit* any exposure value when manual mode is enabled,
and wait until the manual mode as been "applied" before copying the exposure
value:
::
+-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
| N | | N+1 | | N+2 | | N+3 | | N+4 | | N+5 | | N+6 |
+-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
Mode requested: Auto Auto Auto Manual Manual Manual Manual
Exp requested: N/A N/A N/A None None None 5
Set in Frame: N+2 N+3 N+4 N+5 N+6 N+7 N+8
Mode used: Auto Auto Auto Auto Auto Manual Manual
Exp used: 0 1 2 3 4 5 5
In practice, this works. However, libcamera has a policy where once a control
is submitted, its value is saved and does not need to be resubmitted. If the
manual exposure value was set while auto mode was on, in theory the value would
be saved, so when manual mode is enabled, the exposure value that was
previously set would immediately be used. Clearly this solution isn't correct,
but it can serve as the basis for a proper solution, with some more rigorous
rules.
Existing solutions
------------------
Raspberry Pi
^^^^^^^^^^^^
The Raspberry Pi IPA gets around the lack of individual AeEnable controls for
exposure and gain by using magic values. When AeEnable is false, if one of the
manual control values was set to 0 then the value computed by AEGC would be
used for just that control. This solution isn't desirable, as it prevents
that magic value from being used as a valid value.
To get around the flickering issue, when AeEnable is false, the Raspberry Pi
AEGC simply stops updating the values to be set, without restoring the
previously set manual exposure time and gain. This works, but is not a proper
solution.
Android
^^^^^^^
The Android HAL specification requires that exposure and gain (sensitivity)
must both be manual or both be auto. It cannot be that one is manual while the
other is auto, so they simply don't support sub controls.
For the flickering issue, the Android HAL has an AeLock control. To transition
from auto to manual, the application would keep AE on auto, and turn on the
lock. Once the lock has propagated through, then the value can be copied from
the result into the request and the lock disabled and the mode set to manual.
The problem with this solution is, besides the extra complexity, that it is
ambiguous what happens if there is a state transition from manual to locked
(even though it's a state transition that doesn't make sense). If locked is
defined to "use the last automatically computed values" then it could use the
values from the last time it AE was set to auto, or it would be undefined if AE
was never auto (eg. it started out as manual), or if AE is implemented to run
in the background it could just use the current values that are computed. If
locked is defined to "use the last value that was set" there would be less
ambiguity. Still, it's better if we can make it impossible to execute this
nonsensical state transition, and if we can reduce the complexity of having
this extra control or extra setting on a lever.
Summary of goals
----------------
- We need a lock of some sort, to instruct the AEGC to not update output
results
- We need manual modes, to override the values computed by the AEGC
- We need to support seamless transitions from auto to manual, and do so
without flickering
- We need custom minimum values for the manual controls; that is, no magic
values for enabling/disabling auto
- All of these need to be done with AE sub-controls (exposure time, analogue
gain) and be extensible to aperture in the future
Our solution
------------
A diagram of our solution:
::
+----------------------------+-------------+------------------+-----------------+
| INPUT | ALGORITHM | RESULT | OUTPUT |
+----------------------------+-------------+------------------+-----------------+
ExposureTimeMode ExposureTimeMode
---------------------+----------------------------------------+----------------->
0: Auto | |
1: Manual | V
| |\
| | \
| /----------------------------------> | 1| ExposureTime
| | +-------------+ exposure time | | -------------->
\--)--> | | --------------> | 0|
ExposureTime | | | | /
------------------------+--> | | |/
| | AeState
| AEGC | ----------------------------------->
AnalogueGain | |
------------------------+--> | | |\
| | | | \
/--)--> | | --------------> | 0| AnalogueGain
| | +-------------+ analogue gain | | -------------->
| \----------------------------------> | 1|
| | /
| |/
| ^
AnalogueGainMode | | AnalogueGainMode
---------------------+----------------------------------------+----------------->
0: Auto
1: Manual
AeEnable
- True -> ExposureTimeMode:Auto + AnalogueGainMode:Auto
- False -> ExposureTimeMode:Manual + AnalogueGainMode:Manual
The diagram is divided in four sections horizontally:
- Input: The values received from the request controls
- Algorithm: The algorithm itself
- Result: The values calculated by the algorithm
- Output: The values reported in result metadata and applied to the device
The four input controls are divided between manual values (ExposureTime and
AnalogueGain), and operation modes (ExposureTimeMode and AnalogueGainMode). The
former are the manual values, the latter control how they're applied. The two
modes are independent from each other, and each can take one of two values:
- Auto (0): The AGC computes the value normally. The AGC result is applied
to the output. The manual value is ignored *and is not retained*.
- Manual (1): The AGC uses the manual value internally. The corresponding
manual control from the request is applied to the output. The AGC result
is ignored.
The AeState control reports the state of the unified AEGC block. If both
ExposureTimeMode and AnalogueGainMode are set to manual then it will report
Idle. If at least one of the two is set to auto, then AeState will report
if the AEGC has Converged or not (Searching). This control replaces the old
AeLocked control, as it was insufficient for reporting the AE state.
There is a caveat to manual mode: the manual control value is not retained if
it is set during auto mode. This means that if manual mode is entered without
also setting the manual value, then it will enter a state similar to "locked",
where the last automatically computed value while the mode was auto will be
used. Once the manual value is set, then that will be used and retained as
usual.
This simulates an auto -> locked -> manual or auto -> manual state transition,
and makes it impossible to do the nonsensical manual -> locked state
transition.
AeEnable still exists to allow applications to set the mode of all the
sub-controls at once. Besides being for convenience, this will also be useful
when we eventually implement an aperture control. This is because applications
that will be made before aperture will have been available would still be able
to set aperture mode to auto or manual, as opposed to having the aperture stuck
at auto while the application really wanted manual. Although the aperture would
still be stuck at an uncontrollable value, at least it would be at a static
usable value as opposed to varying via the AEGC algorithm.
With this solution, the earlier example would become:
::
+-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
| N+2 | | N+3 | | N+4 | | N+5 | | N+6 | | N+7 | | N+8 | | N+9 | | N+10|
+-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
Mode requested: Auto Manual Manual Manual Manual Manual Manual Manual Manual
Exp requested: N/A None None None None 10 None 10 10
Set in Frame: N+4 N+5 N+6 N+7 N+8 N+9 N+10 N+11 N+12
Mode used: Auto Auto Auto Manual Manual Manual Manual Manual Manual
Exp used: 2 3 4 5 5 5 5 10 10
This example is extended by a few frames to exhibit the simulated "locked"
state. At frame N+5 the application has confirmed that the manual mode has been
entered, but does not provide a manual value until request N+7. Thus, the value
that is used in requests N+5 and N+6 (where the mode is disabled), comes from
the last value that was used when the mode was auto, which comes from frame
N+4.
Then, in N+7, a manual value of 10 is supplied. It takes until frame N+9 for
the exposure to be applied. N+8 does not supply a manual value, but the last
supplied value is retained, so a manual value of 10 is still used and set in
frame N+10.
Although this behavior is the same as what we had with waiting for the manual
mode to propagate (in the section "Description of the problem"), this time it
is correct as we have defined specifically that if a manual value was specified
while the mode was auto, it will not be retained.
Description of the controls
---------------------------
As described above, libcamera offers the following controls related to exposure
and gain:
- AnalogueGain
- AnalogueGainMode
- ExposureTime
- ExposureTimeMode
- AeState
- AeEnable
Auto-exposure and auto-gain can be enabled and disabled separately using the
ExposureTimeMode and AnalogueGainMode controls respectively. The AeEnable
control can also be used, as it sets both of the modes simultaneously. The
AeEnable control is not returned in metadata.
When the respective mode is set to auto, the respective value that is computed
by the AEGC algorithm is applied to the image sensor. Any value that is
supplied in the manual ExposureTime/AnalogueGain control is ignored and not
retained. Another way to understand this is that when the mode transitions from
auto to manual, the internally stored control value is overwritten with the
last value computed by the auto algorithm.
This means that when we transition from auto to manual without supplying a
manual control value, the last value that was set by the AEGC algorithm will
keep be used. This can be used to do a flickerless transition from auto to
manual as described earlier. If the camera started out in manual mode and no
corresponding value has been supplied yet, then a best-effort default value
shall be set.
The manual control value can be set in the same request as setting the mode to
auto if the desired manual control value is already known.
Transitioning from manual to auto shall be implicitly flickerless, as the AEGC
algorithms are expected to start running from the last manual value.
The AeState metadata reports the state of the AE algorithm. As AE cannot
compute exposure and gain separately, the state of the AE component is
unified. There are three states: Idle, Searching, and Converged.
The state shall be Idle if both ExposureTimeMode and AnalogueGainMode
are set to Manual. If the camera only supports one of the two controls,
then the state shall be Idle if that one control is set to Manual. If
the camera does not support Manual for at least one of the two controls,
then the state will never be Idle, as AE will always be running.
The state shall be Searching if at least one of exposure or gain calculated
by the AE algorithm is used (that is, at least one of the two modes is Auto),
*and* the value(s) have not converged yet.
The state shall be Converged if at least one of exposure or gain calculated
by the AE algorithm is used (that is, at least one of the two modes is Auto),
*and* the value(s) have converged.

400
Documentation/docs.rst Normal file
View file

@ -0,0 +1,400 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. contents::
:local:
*************
Documentation
*************
.. toctree::
:hidden:
API <api-html/index>
API
===
The libcamera API is extensively documented using Doxygen. The :ref:`API
nightly build <api>` contains the most up-to-date API documentation, built from
the latest master branch.
Feature Requirements
====================
Device enumeration
------------------
The library shall support enumerating all camera devices available in the
system, including both fixed cameras and hotpluggable cameras. It shall
support cameras plugged and unplugged after the initialization of the
library, and shall offer a mechanism to notify applications of camera plug
and unplug.
The following types of cameras shall be supported:
* Internal cameras designed for point-and-shoot still image and video
capture usage, either controlled directly by the CPU, or exposed through
an internal USB bus as a UVC device.
* External UVC cameras designed for video conferencing usage.
Other types of camera, including analog cameras, depth cameras, thermal
cameras, external digital picture or movie cameras, are out of scope for
this project.
A hardware device that includes independent camera sensors, such as front
and back sensors in a phone, shall be considered as multiple camera devices
for the purpose of this library.
Independent Camera Devices
--------------------------
When multiple cameras are present in the system and are able to operate
independently from each other, the library shall expose them as multiple
camera devices and support parallel operation without any additional usage
restriction apart from the limitations inherent to the hardware (such as
memory bandwidth, CPU usage or number of CSI-2 receivers for instance).
Independent processes shall be able to use independent cameras devices
without interfering with each other. A single camera device shall be
usable by a single process at a time.
Multiple streams support
------------------------
The library shall support multiple video streams running in parallel
for each camera device, within the limits imposed by the system.
Per frame controls
------------------
The library shall support controlling capture parameters for each stream
on a per-frame basis, on a best effort basis based on the capabilities of the
hardware and underlying software stack (including kernel drivers and
firmware). It shall apply capture parameters to the frame they target, and
report the value of the parameters that have effectively been used for each
captured frame.
When a camera device supports multiple streams, the library shall allow both
control of each stream independently, and control of multiple streams
together. Streams that are controlled together shall be synchronized. No
synchronization is required for streams controlled independently.
Capability Enumeration
----------------------
The library shall expose capabilities of each camera device in a way that
allows applications to discover those capabilities dynamically. Applications
shall be allowed to cache capabilities for as long as they are using the
library. If capabilities can change at runtime, the library shall offer a
mechanism to notify applications of such changes. Applications shall not
cache capabilities in long term storage between runs.
Capabilities shall be discovered dynamically at runtime from the device when
possible, and may come, in part or in full, from platform configuration
data.
Device Profiles
---------------
The library may define different camera device profiles, each with a minimum
set of required capabilities. Applications may use those profiles to quickly
determine the level of features exposed by a device without parsing the full
list of capabilities. Camera devices may implement additional capabilities
on top of the minimum required set for the profile they expose.
3A and Image Enhancement Algorithms
-----------------------------------
The camera devices shall implement auto exposure, auto gain and auto white
balance. Camera devices that include a focus lens shall implement auto
focus. Additional image enhancement algorithms, such as noise reduction or
video stabilization, may be implemented.
All algorithms may be implemented in hardware or firmware outside of the
library, or in software in the library. They shall all be controllable by
applications.
The library shall be architectured to isolate the 3A and image enhancement
algorithms in a component with a documented API, respectively called the 3A
component and the 3A API. The 3A API shall be stable, and shall allow both
open-source and closed-source implementations of the 3A component.
The library may include statically-linked open-source 3A components, and
shall support dynamically-linked open-source and closed-source 3A
components.
Closed-source 3A Component Sandboxing
-------------------------------------
For security purposes, it may be desired to run closed-source 3A components
in a separate process. The 3A API would in such a case be transported over
IPC. The 3A API shall make it possible to use any IPC mechanism that
supports passing file descriptors.
The library may implement an IPC mechanism, and shall support third-party
platform-specific IPC mechanisms through the implementation of a
platform-specific 3A API wrapper. No modification to the library shall be
needed to use such third-party IPC mechanisms.
The 3A component shall not directly access any device node on the system.
Such accesses shall instead be performed through the 3A API. The library
shall validate all accesses and restrict them to what is absolutely required
by 3A components.
V4L2 Compatibility Layer
------------------------
The project shall support traditional V4L2 application through an additional
libcamera wrapper library. The wrapper library shall trap all accesses to
camera devices through `LD_PRELOAD`, and route them through libcamera to
emulate a high-level V4L2 camera device. It shall expose camera device
features on a best-effort basis, and aim for the level of features
traditionally available from a UVC camera designed for video conferencing.
Android Camera HAL v3 Compatibility
-----------------------------------
The library API shall expose all the features required to implement an
Android Camera HAL v3 on top of libcamera. Some features of the HAL may be
omitted as long as they can be implemented separately in the HAL, such as
JPEG encoding, or YUV reprocessing.
Camera Stack
============
::
a c / +-------------+ +-------------+ +-------------+ +-------------+
p a | | Native | | Framework | | Native | | Android |
p t | | V4L2 | | Application | | libcamera | | Camera |
l i | | Application | | (gstreamer) | | Application | | Framework |
i o \ +-------------+ +-------------+ +-------------+ +-------------+
n ^ ^ ^ ^
| | | |
l a | | | |
i d v v | v
b a / +-------------+ +-------------+ | +-------------+
c p | | V4L2 | | Camera | | | Android |
a t | | Compat. | | Framework | | | Camera |
m a | | | | (gstreamer) | | | HAL |
e t \ +-------------+ +-------------+ | +-------------+
r i ^ ^ | ^
a o | | | |
n | | | |
/ | ,................................................
| | ! : Language : !
l f | | ! : Bindings : !
i r | | ! : (optional) : !
b a | | \...............................................'
c m | | | | |
a e | | | | |
m w | v v v v
e o | +----------------------------------------------------------------+
r r | | |
a k | | libcamera |
| | |
\ +----------------------------------------------------------------+
^ ^ ^
Userspace | | |
------------------------ | ---------------- | ---------------- | ---------------
Kernel | | |
v v v
+-----------+ +-----------+ +-----------+
| Media | <--> | Video | <--> | V4L2 |
| Device | | Device | | Subdev |
+-----------+ +-----------+ +-----------+
The camera stack comprises four software layers. From bottom to top:
* The kernel drivers control the camera hardware and expose a
low-level interface to userspace through the Linux kernel V4L2
family of APIs (Media Controller API, V4L2 Video Device API and
V4L2 Subdev API).
* The libcamera framework is the core part of the stack. It
handles all control of the camera devices in its core component,
libcamera, and exposes a native C++ API to upper layers. Optional
language bindings allow interfacing to libcamera from other
programming languages.
Those components live in the same source code repository and
all together constitute the libcamera framework.
* The libcamera adaptation is an umbrella term designating the
components that interface to libcamera in other frameworks.
Notable examples are a V4L2 compatibility layer, a gstreamer
libcamera element, and an Android camera HAL implementation based
on libcamera.
Those components can live in the libcamera project source code
in separate repositories, or move to their respective project's
repository (for instance the gstreamer libcamera element).
* The applications and upper level frameworks are based on the
libcamera framework or libcamera adaptation, and are outside of
the scope of the libcamera project.
libcamera Architecture
======================
::
---------------------------< libcamera Public API >---------------------------
^ ^
| |
v v
+-------------+ +-------------------------------------------------+
| Camera | | Camera Device |
| Devices | | +---------------------------------------------+ |
| Manager | | | Device-Agnostic | |
+-------------+ | | | |
^ | | +------------------------+ |
| | | | ~~~~~~~~~~~~~~~~~~~~~ |
| | | | { +---------------+ } |
| | | | } | ////Image//// | { |
| | | | <-> | /Processing// | } |
| | | | } | /Algorithms// | { |
| | | | { +---------------+ } |
| | | | ~~~~~~~~~~~~~~~~~~~~~ |
| | | | ======================== |
| | | | +---------------+ |
| | | | | //Pipeline/// | |
| | | | <-> | ///Handler/// | |
| | | | | ///////////// | |
| | +--------------------+ +---------------+ |
| | Device-Specific |
| +-------------------------------------------------+
| ^ ^
| | |
v v v
+--------------------------------------------------------------------+
| Helpers and Support Classes |
| +-------------+ +-------------+ +-------------+ +-------------+ |
| | MC & V4L2 | | Buffers | | Sandboxing | | Plugins | |
| | Support | | Allocator | | IPC | | Manager | |
| +-------------+ +-------------+ +-------------+ +-------------+ |
| +-------------+ +-------------+ |
| | Pipeline | | ... | |
| | Runner | | | |
| +-------------+ +-------------+ |
+--------------------------------------------------------------------+
/// Device-Specific Components
~~~ Sandboxing
While offering a unified API towards upper layers, and presenting
itself as a single library, libcamera isn't monolithic. It exposes
multiple components through its public API, is built around a set of
separate helpers internally, uses device-specific components and can
load dynamic plugins.
Camera Devices Manager
The Camera Devices Manager provides a view of available cameras
in the system. It performs cold enumeration and runtime camera
management, and supports a hotplug notification mechanism in its
public API.
To avoid the cost associated with cold enumeration of all devices
at application start, and to arbitrate concurrent access to camera
devices, the Camera Devices Manager could later be split to a
separate service, possibly with integration in platform-specific
device management.
Camera Device
The Camera Device represents a camera device to upper layers. It
exposes full control of the device through the public API, and is
thus the highest level object exposed by libcamera.
Camera Device instances are created by the Camera Devices
Manager. An optional function to create new instances could be exposed
through the public API to speed up initialization when the upper
layer knows how to directly address camera devices present in the
system.
Pipeline Handler
The Pipeline Handler manages complex pipelines exposed by the kernel drivers
through the Media Controller and V4L2 APIs. It abstracts pipeline handling to
hide device-specific details to the rest of the library, and implements both
pipeline configuration based on stream configuration, and pipeline runtime
execution and scheduling when needed by the device.
This component is device-specific and is part of the libcamera code base. As
such it is covered by the same free software license as the rest of libcamera
and needs to be contributed upstream by device vendors. The Pipeline Handler
lives in the same process as the rest of the library, and has access to all
helpers and kernel camera-related devices.
Image Processing Algorithms
Together with the hardware image processing and hardware statistics
collection, the Image Processing Algorithms implement 3A (Auto-Exposure,
Auto-White Balance and Auto-Focus) and other algorithms. They run on the CPU
and interact with the kernel camera devices to control hardware image
processing based on the parameters supplied by upper layers, closing the
control loop of the ISP.
This component is device-specific and is loaded as an external plugin. It can
be part of the libcamera code base, in which case it is covered by the same
license, or provided externally as an open-source or closed-source component.
The component is sandboxed and can only interact with libcamera through
internal APIs specifically marked as such. In particular it will have no
direct access to kernel camera devices, and all its accesses to image and
metadata will be mediated by dmabuf instances explicitly passed to the
component. The component must be prepared to run in a process separate from
the main libcamera process, and to have a very restricted view of the system,
including no access to networking APIs and limited access to file systems.
The sandboxing mechanism isn't defined by libcamera. One example
implementation will be provided as part of the project, and platforms vendors
will be able to provide their own sandboxing mechanism as a plugin.
libcamera should provide a basic implementation of Image Processing
Algorithms, to serve as a reference for the internal API. Device vendors are
expected to provide a full-fledged implementation compatible with their
Pipeline Handler. One goal of the libcamera project is to create an
environment in which the community will be able to compete with the
closed-source vendor binaries and develop a high quality open source
implementation.
Helpers and Support Classes
While Pipeline Handlers are device-specific, implementations are expected to
share code due to usage of identical APIs towards the kernel camera drivers
and the Image Processing Algorithms. This includes without limitation handling
of the MC and V4L2 APIs, buffer management through dmabuf, and pipeline
discovery, configuration and scheduling. Such code will be factored out to
helpers when applicable.
Other parts of libcamera will also benefit from factoring code out to
self-contained support classes, even if such code is present only once in the
code base, in order to keep the source code clean and easy to read. This
should be the case for instance for plugin management.
V4L2 Compatibility Layer
------------------------
V4L2 compatibility is achieved through a shared library that traps all
accesses to camera devices and routes them to libcamera to emulate high-level
V4L2 camera devices. It is injected in a process address space through
`LD_PRELOAD` and is completely transparent for applications.
The compatibility layer exposes camera device features on a best-effort basis,
and aims for the level of features traditionally available from a UVC camera
designed for video conferencing.
Android Camera HAL
------------------
Camera support for Android is achieved through a generic Android
camera HAL implementation on top of libcamera. The HAL will implement internally
features required by Android and missing from libcamera, such as JPEG encoding
support.
The Android camera HAL implementation will initially target the
LIMITED hardware level, with support for the FULL level then being gradually
implemented.

View file

@ -1,35 +0,0 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. container:: documentation-nav
* **Documentation for Users**
* :doc:`Introduction </introduction>`
* :doc:`/feature_requirements`
* :doc:`/guides/application-developer`
* :doc:`/python-bindings`
* :doc:`/environment_variables`
* :doc:`/api-html/index`
* :doc:`/code-of-conduct`
* |
* **Documentation for Developers**
* :doc:`/libcamera_architecture`
* :doc:`/guides/pipeline-handler`
* :doc:`/guides/ipa`
* :doc:`/camera-sensor-model`
* :doc:`/guides/tracing`
* :doc:`/software-isp-benchmarking`
* :doc:`/coding-style`
* :doc:`/internal-api-html/index`
* |
* **Documentation for System Integrators**
* :doc:`/lens_driver_requirements`
* :doc:`/sensor_driver_requirements`
..
The following directive adds the "documentation" class to all of the pages
generated by sphinx. This is not relevant in libcamera nor addressed in the
theme's CSS, since all of the pages here are documentation. It **is** used
to properly format the documentation pages on libcamera.org and so should not
be removed.
.. rst-class:: documentation

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
Environment variables
=====================
@ -39,11 +37,6 @@ LIBCAMERA_IPA_MODULE_PATH
Example value: ``${HOME}/.libcamera/lib:/opt/libcamera/vendor/lib``
LIBCAMERA_IPA_PROXY_PATH
Define custom full path for a proxy worker for a given executable name.
Example value: ``${HOME}/.libcamera/proxy/worker:/opt/libcamera/vendor/proxy/worker``
LIBCAMERA_PIPELINES_MATCH_LIST
Define an ordered list of pipeline names to be used to match the media
devices in the system. The pipeline handler names used to populate the
@ -57,11 +50,6 @@ LIBCAMERA_RPI_CONFIG_FILE
Example value: ``/usr/local/share/libcamera/pipeline/rpi/vc4/minimal_mem.yaml``
LIBCAMERA_<NAME>_TUNING_FILE
Define a custom IPA tuning file to use with the pipeline handler `NAME`.
Example value: ``/usr/local/share/libcamera/ipa/rpi/vc4/custom_sensor.json``
Further details
---------------

View file

@ -1,150 +0,0 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
Feature Requirements
====================
Device enumeration
------------------
The library shall support enumerating all camera devices available in the
system, including both fixed cameras and hotpluggable cameras. It shall
support cameras plugged and unplugged after the initialization of the
library, and shall offer a mechanism to notify applications of camera plug
and unplug.
The following types of cameras shall be supported:
* Internal cameras designed for point-and-shoot still image and video
capture usage, either controlled directly by the CPU, or exposed through
an internal USB bus as a UVC device.
* External UVC cameras designed for video conferencing usage.
Other types of camera, including analog cameras, depth cameras, thermal
cameras, external digital picture or movie cameras, are out of scope for
this project.
A hardware device that includes independent camera sensors, such as front
and back sensors in a phone, shall be considered as multiple camera devices
for the purpose of this library.
Independent Camera Devices
--------------------------
When multiple cameras are present in the system and are able to operate
independently from each other, the library shall expose them as multiple
camera devices and support parallel operation without any additional usage
restriction apart from the limitations inherent to the hardware (such as
memory bandwidth, CPU usage or number of CSI-2 receivers for instance).
Independent processes shall be able to use independent cameras devices
without interfering with each other. A single camera device shall be
usable by a single process at a time.
Multiple streams support
------------------------
The library shall support multiple video streams running in parallel
for each camera device, within the limits imposed by the system.
Per frame controls
------------------
The library shall support controlling capture parameters for each stream
on a per-frame basis, on a best effort basis based on the capabilities of the
hardware and underlying software stack (including kernel drivers and
firmware). It shall apply capture parameters to the frame they target, and
report the value of the parameters that have effectively been used for each
captured frame.
When a camera device supports multiple streams, the library shall allow both
control of each stream independently, and control of multiple streams
together. Streams that are controlled together shall be synchronized. No
synchronization is required for streams controlled independently.
Capability Enumeration
----------------------
The library shall expose capabilities of each camera device in a way that
allows applications to discover those capabilities dynamically. Applications
shall be allowed to cache capabilities for as long as they are using the
library. If capabilities can change at runtime, the library shall offer a
mechanism to notify applications of such changes. Applications shall not
cache capabilities in long term storage between runs.
Capabilities shall be discovered dynamically at runtime from the device when
possible, and may come, in part or in full, from platform configuration
data.
Device Profiles
---------------
The library may define different camera device profiles, each with a minimum
set of required capabilities. Applications may use those profiles to quickly
determine the level of features exposed by a device without parsing the full
list of capabilities. Camera devices may implement additional capabilities
on top of the minimum required set for the profile they expose.
3A and Image Enhancement Algorithms
-----------------------------------
The library shall provide a basic implementation of Image Processing Algorithms
to serve as a reference for the internal API. This shall including auto exposure
and gain and auto white balance. Camera devices that include a focus lens shall
implement auto focus. Additional image enhancement algorithms, such as noise
reduction or video stabilization, may be implemented. Device vendors are
expected to provide a fully-fledged implementation compatible with their
Pipeline Handler. One goal of the libcamera project is to create an environment
in which the community will be able to compete with the closed-source vendor
biaries and develop a high quality open source implementation.
All algorithms may be implemented in hardware or firmware outside of the
library, or in software in the library. They shall all be controllable by
applications.
The library shall be architectured to isolate the 3A and image enhancement
algorithms in a component with a documented API, respectively called the 3A
component and the 3A API. The 3A API shall be stable, and shall allow both
open-source and closed-source implementations of the 3A component.
The library may include statically-linked open-source 3A components, and
shall support dynamically-linked open-source and closed-source 3A
components.
Closed-source 3A Component Sandboxing
-------------------------------------
For security purposes, it may be desired to run closed-source 3A components
in a separate process. The 3A API would in such a case be transported over
IPC. The 3A API shall make it possible to use any IPC mechanism that
supports passing file descriptors.
The library may implement an IPC mechanism, and shall support third-party
platform-specific IPC mechanisms through the implementation of a
platform-specific 3A API wrapper. No modification to the library shall be
needed to use such third-party IPC mechanisms.
The 3A component shall not directly access any device node on the system.
Such accesses shall instead be performed through the 3A API. The library
shall validate all accesses and restrict them to what is absolutely required
by 3A components.
V4L2 Compatibility Layer
------------------------
The project shall support traditional V4L2 application through an additional
libcamera wrapper library. The wrapper library shall trap all accesses to
camera devices through `LD_PRELOAD`, and route them through libcamera to
emulate a high-level V4L2 camera device. It shall expose camera device
features on a best-effort basis, and aim for the level of features
traditionally available from a UVC camera designed for video conferencing.
Android Camera HAL v3 Compatibility
-----------------------------------
The library API shall expose all the features required to implement an
Android Camera HAL v3 on top of libcamera. Some features of the HAL may be
omitted as long as they can be implemented separately in the HAL, such as
JPEG encoding, or YUV reprocessing.

View file

@ -1,46 +0,0 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (C) 2024, Google Inc.
#
# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
#
# Generate Doxyfile from a template
import argparse
import os
import string
import sys
def fill_template(template, data):
template = open(template, 'rb').read()
template = template.decode('utf-8')
template = string.Template(template)
return template.substitute(data)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-o', dest='output', metavar='file',
type=argparse.FileType('w', encoding='utf-8'),
default=sys.stdout,
help='Output file name (default: standard output)')
parser.add_argument('template', metavar='doxyfile.tmpl', type=str,
help='Doxyfile template')
parser.add_argument('inputs', type=str, nargs='*',
help='Input files')
args = parser.parse_args(argv[1:])
inputs = [f'"{os.path.realpath(input)}"' for input in args.inputs]
data = fill_template(args.template, {'inputs': (' \\\n' + ' ' * 25).join(inputs)})
args.output.write(data)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))

View file

@ -1,5 +1,4 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. Getting started information is defined in the project README file.
.. include:: ../README.rst
:start-after: .. section-begin-getting-started

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: ../documentation-contents.rst
Using libcamera in a C++ application
====================================
@ -118,21 +116,19 @@ available.
.. code:: cpp
auto cameras = cm->cameras();
if (cameras.empty()) {
if (cm->cameras().empty()) {
std::cout << "No cameras were identified on the system."
<< std::endl;
cm->stop();
return EXIT_FAILURE;
}
std::string cameraId = cameras[0]->id();
std::string cameraId = cm->cameras()[0]->id();
camera = cm->get(cameraId);
/*
* Note that `camera` may not compare equal to `cameras[0]`.
* In fact, it might simply be a `nullptr`, as the particular
* device might have disappeared (and reappeared) in the meantime.
* Note that is equivalent to:
* camera = cm->cameras()[0];
*/
Once a camera has been selected an application needs to acquire an exclusive
@ -483,7 +479,7 @@ instance. An example of how to write image data to disk is available in the
`FileSink class`_ which is a part of the ``cam`` utility application in the
libcamera repository.
.. _FileSink class: https://git.libcamera.org/libcamera/libcamera.git/tree/src/apps/cam/file_sink.cpp
.. _FileSink class: https://git.libcamera.org/libcamera/libcamera.git/tree/src/cam/file_sink.cpp
With the handling of this request completed, it is possible to re-use the
request and the associated buffers and re-queue it to the camera
@ -618,7 +614,7 @@ accordingly. In this example, the application file has been named
simple_cam = executable('simple-cam',
'simple-cam.cpp',
dependencies: dependency('libcamera'))
dependencies: dependency('libcamera', required : true))
The ``dependencies`` line instructs meson to ask ``pkgconfig`` (or ``cmake``) to
locate the ``libcamera`` library, which the test application will be

View file

@ -0,0 +1,319 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
Developers guide to libcamera
=============================
The Linux kernel handles multimedia devices through the 'Linux media' subsystem
and provides a set of APIs (application programming interfaces) known
collectively as V4L2 (`Video for Linux 2`_) and the `Media Controller`_ API
which provide an interface to interact and control media devices.
Included in this subsystem are drivers for camera sensors, CSI2 (Camera
Serial Interface) receivers, and ISPs (Image Signal Processors)
The usage of these drivers to provide a functioning camera stack is a
responsibility that lies in userspace which is commonly implemented separately
by vendors without a common architecture or API for application developers.
libcamera provides a complete camera stack for Linux based systems to abstract
functionality desired by camera application developers and process the
configuration of hardware and image control algorithms required to obtain
desirable results from the camera.
.. _Video for Linux 2: https://www.linuxtv.org/downloads/v4l-dvb-apis-new/userspace-api/v4l/v4l2.html
.. _Media Controller: https://www.linuxtv.org/downloads/v4l-dvb-apis-new/userspace-api/mediactl/media-controller.html
In this developers guide, we will explore the `Camera Stack`_ and how it is
can be visualised at a high level, and explore the internal `Architecture`_ of
the libcamera library with its components. The current `Platform Support`_ is
detailed, as well as an overview of the `Licensing`_ requirements of the
project.
This introduction is followed by a walkthrough tutorial to newcomers wishing to
support a new platform with the `Pipeline Handler Writers Guide`_ and for those
looking to make use of the libcamera native API an `Application Writers Guide`_
provides a tutorial of the key APIs exposed by libcamera.
.. _Pipeline Handler Writers Guide: pipeline-handler.html
.. _Application Writers Guide: application-developer.html
.. TODO: Correctly link to the other articles of the guide
Camera Stack
------------
The libcamera library is implemented in userspace, and makes use of underlying
kernel drivers that directly interact with hardware.
Applications can make use of libcamera through the native `libcamera API`_'s or
through an adaptation layer integrating libcamera into a larger framework.
.. _libcamera API: https://www.libcamera.org/api-html/index.html
::
Application Layer
/ +--------------+ +--------------+ +--------------+ +--------------+
| | Native | | Framework | | Native | | Android |
| | V4L2 | | Application | | libcamera | | Camera |
| | Application | | (gstreamer) | | Application | | Framework |
\ +--------------+ +--------------+ +--------------+ +--------------+
^ ^ ^ ^
| | | |
| | | |
v v | v
Adaptation Layer |
/ +--------------+ +--------------+ | +--------------+
| | V4L2 | | gstreamer | | | Android |
| | Compatibility| | element | | | Camera |
| | (preload) | |(libcamerasrc)| | | HAL |
\ +--------------+ +--------------+ | +--------------+
|
^ ^ | ^
| | | |
| | | |
v v v v
libcamera Framework
/ +--------------------------------------------------------------------+
| | |
| | libcamera |
| | |
\ +--------------------------------------------------------------------+
^ ^ ^
Userspace | | |
--------------------- | ---------------- | ---------------- | ---------------
Kernel | | |
v v v
+-----------+ +-----------+ +-----------+
| Media | <--> | Video | <--> | V4L2 |
| Device | | Device | | Subdev |
+-----------+ +-----------+ +-----------+
The camera stack comprises of four software layers. From bottom to top:
* The kernel drivers control the camera hardware and expose a low-level
interface to userspace through the Linux kernel V4L2 family of APIs
(Media Controller API, V4L2 Video Device API and V4L2 Subdev API).
* The libcamera framework is the core part of the stack. It handles all control
of the camera devices in its core component, libcamera, and exposes a native
C++ API to upper layers.
* The libcamera adaptation layer is an umbrella term designating the components
that interface to libcamera in other frameworks. Notable examples are the V4L2
compatibility layer, the gstreamer libcamera element, and the Android camera
HAL implementation based on libcamera which are provided as a part of the
libcamera project.
* The applications and upper level frameworks are based on the libcamera
framework or libcamera adaptation, and are outside of the scope of the
libcamera project, however example native applications (cam, qcam) are
provided for testing.
V4L2 Compatibility Layer
V4L2 compatibility is achieved through a shared library that traps all
accesses to camera devices and routes them to libcamera to emulate high-level
V4L2 camera devices. It is injected in a process address space through
``LD_PRELOAD`` and is completely transparent for applications.
The compatibility layer exposes camera device features on a best-effort basis,
and aims for the level of features traditionally available from a UVC camera
designed for video conferencing.
Android Camera HAL
Camera support for Android is achieved through a generic Android camera HAL
implementation on top of libcamera. The HAL implements features required by
Android and out of scope from libcamera, such as JPEG encoding support.
This component is used to provide support for ChromeOS platforms
GStreamer element (gstlibcamerasrc)
A `GStreamer element`_ is provided to allow capture from libcamera supported
devices through GStreamer pipelines, and connect to other elements for further
processing.
Development of this element is ongoing and is limited to a single stream.
Native libcamera API
Applications can make use of the libcamera API directly using the C++
API. An example application and walkthrough using the libcamera API can be
followed in the `Application Writers Guide`_
.. _GStreamer element: https://gstreamer.freedesktop.org/documentation/application-development/basics/elements.html
Architecture
------------
While offering a unified API towards upper layers, and presenting itself as a
single library, libcamera isn't monolithic. It exposes multiple components
through its public API and is built around a set of separate helpers internally.
Hardware abstractions are handled through the use of device-specific components
where required and dynamically loadable plugins are used to separate image
processing algorithms from the core libcamera codebase.
::
--------------------------< libcamera Public API >---------------------------
^ ^
| |
v v
+-------------+ +---------------------------------------------------+
| Camera | | Camera Device |
| Manager | | +-----------------------------------------------+ |
+-------------+ | | Device-Agnostic | |
^ | | | |
| | | +--------------------------+ |
| | | | ~~~~~~~~~~~~~~~~~~~~~~~ |
| | | | { +-----------------+ } |
| | | | } | //// Image //// | { |
| | | | <-> | / Processing // | } |
| | | | } | / Algorithms // | { |
| | | | { +-----------------+ } |
| | | | ~~~~~~~~~~~~~~~~~~~~~~~ |
| | | | ========================== |
| | | | +-----------------+ |
| | | | | // Pipeline /// | |
| | | | <-> | /// Handler /// | |
| | | | | /////////////// | |
| | +--------------------+ +-----------------+ |
| | Device-Specific |
| +---------------------------------------------------+
| ^ ^
| | |
v v v
+--------------------------------------------------------------------+
| Helpers and Support Classes |
| +-------------+ +-------------+ +-------------+ +-------------+ |
| | MC & V4L2 | | Buffers | | Sandboxing | | Plugins | |
| | Support | | Allocator | | IPC | | Manager | |
| +-------------+ +-------------+ +-------------+ +-------------+ |
| +-------------+ +-------------+ |
| | Pipeline | | ... | |
| | Runner | | | |
| +-------------+ +-------------+ |
+--------------------------------------------------------------------+
/// Device-Specific Components
~~~ Sandboxing
Camera Manager
The Camera Manager enumerates cameras and instantiates Pipeline Handlers to
manage each Camera that libcamera supports. The Camera Manager supports
hotplug detection and notification events when supported by the underlying
kernel devices.
There is only ever one instance of the Camera Manager running per application.
Each application's instance of the Camera Manager ensures that only a single
application can take control of a camera device at once.
Read the `Camera Manager API`_ documentation for more details.
.. _Camera Manager API: https://libcamera.org/api-html/classlibcamera_1_1CameraManager.html
Camera Device
The Camera class represents a single item of camera hardware that is capable
of producing one or more image streams, and provides the API to interact with
the underlying device.
If a system has multiple instances of the same hardware attached, each has its
own instance of the camera class.
The API exposes full control of the device to upper layers of libcamera through
the public API, making it the highest level object libcamera exposes, and the
object that all other API operations interact with from configuration to
capture.
Read the `Camera API`_ documentation for more details.
.. _Camera API: https://libcamera.org/api-html/classlibcamera_1_1Camera.html
Pipeline Handler
The Pipeline Handler manages the complex pipelines exposed by the kernel
drivers through the Media Controller and V4L2 APIs. It abstracts pipeline
handling to hide device-specific details from the rest of the library, and
implements both pipeline configuration based on stream configuration, and
pipeline runtime execution and scheduling when needed by the device.
The Pipeline Handler lives in the same process as the rest of the library, and
has access to all helpers and kernel camera-related devices.
Hardware abstraction is handled by device specific Pipeline Handlers which are
derived from the Pipeline Handler base class allowing commonality to be shared
among the implementations.
Derived pipeline handlers create Camera device instances based on the devices
they detect and support on the running system, and are responsible for
managing the interactions with a camera device.
More details can be found in the `PipelineHandler API`_ documentation, and the
`Pipeline Handler Writers Guide`_.
.. _PipelineHandler API: https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html
Image Processing Algorithms
An image processing algorithm (IPA) component is a loadable plugin that
implements 3A (Auto-Exposure, Auto-White Balance, and Auto-Focus) and other
algorithms.
The algorithms run on the CPU and interact with the camera devices through the
Pipeline Handler to control hardware image processing based on the parameters
supplied by upper layers, maintaining state and closing the control loop
of the ISP.
The component is sandboxed and can only interact with libcamera through the
API provided by the Pipeline Handler and an IPA has no direct access to kernel
camera devices.
Open source IPA modules built with libcamera can be run in the same process
space as libcamera, however external IPA modules are run in a separate process
from the main libcamera process. IPA modules have a restricted view of the
system, including no access to networking APIs and limited access to file
systems.
IPA modules are only required for platforms and devices with an ISP controlled
by the host CPU. Camera sensors which have an integrated ISP are not
controlled through the IPA module.
Platform Support
----------------
The library currently supports the following hardware platforms specifically
with dedicated pipeline handlers:
- Intel IPU3 (ipu3)
- Rockchip RK3399 (rkisp1)
- RaspberryPi 3 and 4 (rpi/vc4)
Furthermore, generic platform support is provided for the following:
- USB video device class cameras (uvcvideo)
- iMX7, Allwinner Sun6i (simple)
- Virtual media controller driver for test use cases (vimc)
Licensing
---------
The libcamera core, is covered by the `LGPL-2.1-or-later`_ license. Pipeline
Handlers are a part of the libcamera code base and need to be contributed
upstream by device vendors. IPA modules included in libcamera are covered by a
free software license, however third-parties may develop IPA modules outside of
libcamera and distribute them under a closed-source license, provided they do
not include source code from the libcamera project.
The libcamera project itself contains multiple libraries, applications and
utilities. Licenses are expressed through SPDX tags in text-based files that
support comments, and through the .reuse/dep5 file otherwise. A copy of all
licenses are stored in the LICENSES directory, and a full summary of the
licensing used throughout the project can be found in the COPYING.rst document.
Applications which link dynamically against libcamera and use only the public
API are an independent work of the authors and have no license restrictions
imposed upon them from libcamera.
.. _LGPL-2.1-or-later: https://spdx.org/licenses/LGPL-2.1-or-later.html

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: ../documentation-contents.rst
IPA Writer's Guide
==================

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: ../documentation-contents.rst
Pipeline Handler Writers Guide
==============================
@ -153,14 +151,13 @@ integrates with the libcamera build system, and a *vivid.cpp* file that matches
the name of the pipeline.
In the *meson.build* file, add the *vivid.cpp* file as a build source for
libcamera by adding it to the global meson ``libcamera_internal_sources``
variable:
libcamera by adding it to the global meson ``libcamera_sources`` variable:
.. code-block:: none
# SPDX-License-Identifier: CC0-1.0
libcamera_internal_sources += files([
libcamera_sources += files([
'vivid.cpp',
])
@ -186,7 +183,7 @@ to the libcamera build options in the top level ``meson_options.txt``.
option('pipelines',
type : 'array',
choices : ['ipu3', 'rkisp1', 'rpi/pisp', 'rpi/vc4', 'simple', 'uvcvideo', 'vimc', 'vivid'],
choices : ['ipu3', 'rkisp1', 'rpi/vc4', 'simple', 'uvcvideo', 'vimc', 'vivid'],
description : 'Select which pipeline handlers to include')
@ -213,7 +210,7 @@ implementations for the overridden class members.
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
int start(Camera *camera, const ControlList *controls) override;
void stopDevice(Camera *camera) override;
void stop(Camera *camera) override;
int queueRequestDevice(Camera *camera, Request *request) override;
@ -247,7 +244,7 @@ implementations for the overridden class members.
return -1;
}
void PipelineHandlerVivid::stopDevice(Camera *camera)
void PipelineHandlerVivid::stop(Camera *camera)
{
}
@ -521,14 +518,14 @@ handler and camera manager using `registerCamera`_.
Finally with a successful construction, we return 'true' indicating that the
PipelineHandler successfully matched and constructed a device.
.. _Camera::create: https://libcamera.org/internal-api-html/classlibcamera_1_1Camera.html#adf5e6c22411f953bfaa1ae21155d6c31
.. _Camera::create: https://libcamera.org/api-html/classlibcamera_1_1Camera.html#a453740e0d2a2f495048ae307a85a2574
.. _registerCamera: https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html#adf02a7f1bbd87aca73c0e8d8e0e6c98b
.. code-block:: cpp
std::set<Stream *> streams{ &data->stream_ };
std::shared_ptr<Camera> camera = Camera::create(std::move(data), data->video_->deviceName(), streams);
registerCamera(std::move(camera));
std::shared_ptr<Camera> camera = Camera::create(this, data->video_->deviceName(), streams);
registerCamera(std::move(camera), std::move(data));
return true;
@ -554,7 +551,8 @@ Our match function should now look like the following:
/* Create and register the camera. */
std::set<Stream *> streams{ &data->stream_ };
std::shared_ptr<Camera> camera = Camera::create(std::move(data), data->video_->deviceName(), streams);
const std::string &id = data->video_->deviceName();
std::shared_ptr<Camera> camera = Camera::create(data.release(), id, streams);
registerCamera(std::move(camera));
return true;
@ -592,11 +590,11 @@ immutable properties of the ``Camera`` device.
The libcamera controls and properties are defined in YAML form which is
processed to automatically generate documentation and interfaces. Controls are
defined by the src/libcamera/`control_ids_core.yaml`_ file and camera properties
are defined by src/libcamera/`property_ids_core.yaml`_.
are defined by src/libcamera/`properties_ids_core.yaml`_.
.. _controls framework: https://libcamera.org/api-html/controls_8h.html
.. _control_ids_core.yaml: https://libcamera.org/api-html/control__ids_8h.html
.. _property_ids_core.yaml: https://libcamera.org/api-html/property__ids_8h.html
.. _properties_ids_core.yaml: https://libcamera.org/api-html/property__ids_8h.html
Pipeline handlers can optionally register the list of controls an application
can set as well as a list of immutable camera properties. Being both
@ -799,7 +797,8 @@ derived class, and assign it to a base class pointer.
.. code-block:: cpp
auto config = std::make_unique<VividCameraConfiguration>();
VividCameraData *data = cameraData(camera);
CameraConfiguration *config = new VividCameraConfiguration();
A ``CameraConfiguration`` is specific to each pipeline, so you can only create
it from the pipeline handler code path. Applications can also generate an empty
@ -827,7 +826,9 @@ To generate a ``StreamConfiguration``, you need a list of pixel formats and
frame sizes which are supported as outputs of the stream. You can fetch a map of
the ``V4LPixelFormat`` and ``SizeRange`` supported by the underlying output
device, but the pipeline handler needs to convert this to a
``libcamera::PixelFormat`` type to pass to applications.
``libcamera::PixelFormat`` type to pass to applications. We do this here using
``std::transform`` to convert the formats and populate a new ``PixelFormat`` map
as shown below.
Continue adding the following code example to our ``generateConfiguration``
implementation.
@ -837,12 +838,14 @@ implementation.
std::map<V4L2PixelFormat, std::vector<SizeRange>> v4l2Formats =
data->video_->formats();
std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
for (auto &[v4l2PixelFormat, sizes] : v4l2Formats) {
PixelFormat pixelFormat = v4l2PixelFormat.toPixelFormat();
if (pixelFormat.isValid())
deviceFormats.try_emplace(pixelFormat, std::move(sizes));
}
std::transform(v4l2Formats.begin(), v4l2Formats.end(),
std::inserter(deviceFormats, deviceFormats.begin()),
[&](const decltype(v4l2Formats)::value_type &format) {
return decltype(deviceFormats)::value_type{
format.first.toPixelFormat(),
format.second
};
});
The `StreamFormats`_ class holds information about the pixel formats and frame
sizes that a stream can support. The class groups size information by the pixel
@ -932,9 +935,9 @@ Add the following function implementation to your file:
StreamConfiguration &cfg = config_[0];
const std::vector<libcamera::PixelFormat> &formats = cfg.formats().pixelformats();
const std::vector<libcamera::PixelFormat> formats = cfg.formats().pixelformats();
if (std::find(formats.begin(), formats.end(), cfg.pixelFormat) == formats.end()) {
cfg.pixelFormat = formats[0];
cfg.pixelFormat = cfg.formats().pixelformats()[0];
LOG(VIVID, Debug) << "Adjusting format to " << cfg.pixelFormat.toString();
status = Adjusted;
}
@ -1152,7 +1155,7 @@ available to the devices which have to be started and ready to produce
images. At the end of a capture session the ``Camera`` device needs to be
stopped, to gracefully clean up any allocated memory and stop the hardware
devices. Pipeline handlers implement two functions for these purposes, the
``start()`` and ``stopDevice()`` functions.
``start()`` and ``stop()`` functions.
The memory initialization phase that happens at ``start()`` time serves to
configure video devices to be able to use memory buffers exported as dma-buf
@ -1255,8 +1258,8 @@ algorithms, or other devices you should also stop them.
.. _releaseBuffers: https://libcamera.org/api-html/classlibcamera_1_1V4L2VideoDevice.html#a191619c152f764e03bc461611f3fcd35
Of course we also need to handle the corresponding actions to stop streaming on
a device, Add the following to the ``stopDevice()`` function, to stop the
stream with the `streamOff`_ function and release all buffers.
a device, Add the following to the ``stop`` function, to stop the stream with
the `streamOff`_ function and release all buffers.
.. _streamOff: https://libcamera.org/api-html/classlibcamera_1_1V4L2VideoDevice.html#a61998710615bdf7aa25a046c8565ed66
@ -1344,7 +1347,7 @@ before being set.
continue;
}
int32_t value = std::lround(it.second.get<float>() * 128 + offset);
int32_t value = lroundf(it.second.get<float>() * 128 + offset);
controls.set(cid, std::clamp(value, 0, 255));
}
@ -1408,7 +1411,7 @@ value translation operations:
.. code-block:: cpp
#include <cmath>
#include <math.h>
Frame completion and event handling
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: ../documentation-contents.rst
Tracing Guide
=============

View file

@ -1,31 +1,27 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: introduction.rst
.. Front page matter is defined in the project README file.
.. include:: ../README.rst
:start-after: .. section-begin-libcamera
:end-before: .. section-end-libcamera
.. toctree::
:maxdepth: 1
:caption: Contents:
Home <self>
Docs <docs>
Contribute <contributing>
Getting Started <getting-started>
Developer Guide <guides/introduction>
Application Writer's Guide <guides/application-developer>
Camera Sensor Model <camera-sensor-model>
Environment variables <environment_variables>
Feature Requirements <feature_requirements>
IPA Writer's guide <guides/ipa>
Lens driver requirements <lens_driver_requirements>
libcamera Architecture <libcamera_architecture>
Pipeline Handler Writer's Guide <guides/pipeline-handler>
Python Bindings <python-bindings>
Sensor driver requirements <sensor_driver_requirements>
SoftwareISP Benchmarking <software-isp-benchmarking>
IPA Writer's guide <guides/ipa>
Tracing guide <guides/tracing>
Design document: AE <design/ae>
.. toctree::
:hidden:
introduction
Environment variables <environment_variables>
Sensor driver requirements <sensor_driver_requirements>
Lens driver requirements <lens_driver_requirements>
Python Bindings <python-bindings>
Camera Sensor Model <camera-sensor-model>
SoftwareISP Benchmarking <software-isp-benchmarking>

View file

@ -1,8 +0,0 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. _internal-api:
Internal API Reference
======================
:: Placeholder for Doxygen documentation

View file

@ -1,224 +0,0 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
************
Introduction
************
.. toctree::
:hidden:
API <api-html/index>
Internal API <internal-api-html/index>
What is libcamera?
==================
libcamera is an open source complex camera support library for Linux, Android
and ChromeOS. The library interfaces with Linux kernel device drivers and
provides an intuitive API to developers in order to simplify the complexity
involved in capturing images from complex cameras on Linux systems.
What is a "complex camera"?
===========================
A modern "camera" tends to infact be several different pieces of hardware which
must all be controlled together in order to produce and capture images of
appropriate quality. A hardware pipeline typically consists of a camera sensor
that captures raw frames and transmits them on a bus, a receiver that decodes
the bus signals, and an image signal processor that processes raw frames to
produce usable images in a standard format. The Linux kernel handles these
multimedia devices through the 'Linux media' subsystem and provides a set of
application programming interfaces known collectively as the
V4L2 (`Video for Linux 2`_) and the `Media Controller`_ APIs, which provide an
interface to interact and control media devices.
.. _Video for Linux 2: https://www.linuxtv.org/downloads/v4l-dvb-apis-new/userspace-api/v4l/v4l2.html
.. _Media Controller: https://www.linuxtv.org/downloads/v4l-dvb-apis-new/userspace-api/mediactl/media-controller.html
Included in this subsystem are drivers for camera sensors, CSI2 (Camera
Serial Interface) receivers, and ISPs (Image Signal Processors).
The usage of these drivers to provide a functioning camera stack is a
responsibility that lies in userspace, and is commonly implemented separately
by vendors without a common architecture or API for application developers. This
adds a lot of complexity to the task, particularly when considering that the
differences in hardware pipelines and their representation in the kernel's APIs
often necessitate bespoke handling.
What is libcamera for?
======================
libcamera provides a complete camera stack for Linux-based systems to abstract
the configuration of hardware and image control algorithms required to obtain
desirable results from the camera through the kernel's APIs, reducing those
operations to a simple and consistent method for developers. In short instead of
having to deal with this:
.. graphviz:: mali-c55.dot
you can instead simply deal with:
.. code-block:: python
>>> import libcamera as lc
>>> camera_manager = lc.CameraManager.singleton()
[0:15:59.582029920] [504] INFO Camera camera_manager.cpp:313 libcamera v0.3.0+182-01e57380
>>> for camera in camera_manager.cameras:
... print(f' - {camera.id}')
...
- mali-c55 tpg
- imx415 1-001a
The library handles the rest for you. These documentary pages give more
information on the internal workings of libcamera (and the kernel camera stack
that lies behind it) as well as guidance on using libcamera in an application or
extending the library with support for your hardware (through the pipeline
handler and IPA module writer's guides).
How should I use it?
====================
There are a few ways you might want to use libcamera, depending on your
application. It's always possible to use the library directly, and you can find
detailed information on how to do so in the
:doc:`application writer's guide <guides/application-developer>`.
It is often more appropriate to use one of the frameworks with libcamera
support. For example an application powering an embedded media device
incorporating capture, encoding and streaming of both video and audio would
benefit from using `GStreamer`_, for which libcamera provides a plugin.
Similarly an application for user-facing devices like a laptop would likely
benefit accessing cameras through the XDG camera portal and `pipewire`_, which
brings the advantages of resource sharing (multiple applications accessing the
stream at the same time) and access control.
.. _GStreamer: https://gstreamer.freedesktop.org/
.. _pipewire: https://pipewire.org/
Camera Stack
============
::
a c / +-------------+ +-------------+ +-------------+ +-------------+
p a | | Native | | Framework | | Native | | Android |
p t | | V4L2 | | Application | | libcamera | | Camera |
l i | | Application | | (gstreamer) | | Application | | Framework |
i o \ +-------------+ +-------------+ +-------------+ +-------------+
n ^ ^ ^ ^
| | | |
l a | | | |
i d v v | v
b a / +-------------+ +-------------+ | +-------------+
c p | | V4L2 | | Camera | | | Android |
a t | | Compat. | | Framework | | | Camera |
m a | | | | (gstreamer) | | | HAL |
e t \ +-------------+ +-------------+ | +-------------+
r i ^ ^ | ^
a o | | | |
n | | | |
/ | ,................................................
| | ! : Language : !
l f | | ! : Bindings : !
i r | | ! : (optional) : !
b a | | \...............................................'
c m | | | | |
a e | | | | |
m w | v v v v
e o | +----------------------------------------------------------------+
r r | | |
a k | | libcamera |
| | |
\ +----------------------------------------------------------------+
^ ^ ^
Userspace | | |
------------------------ | ---------------- | ---------------- | ---------------
Kernel | | |
v v v
+-----------+ +-----------+ +-----------+
| Media | <--> | Video | <--> | V4L2 |
| Device | | Device | | Subdev |
+-----------+ +-----------+ +-----------+
The camera stack comprises four software layers. From bottom to top:
* The kernel drivers control the camera hardware and expose a
low-level interface to userspace through the Linux kernel V4L2
family of APIs (Media Controller API, V4L2 Video Device API and
V4L2 Subdev API).
* The libcamera framework is the core part of the stack. It
handles all control of the camera devices in its core component,
libcamera, and exposes a native C++ API to upper layers. Optional
language bindings allow interfacing to libcamera from other
programming languages.
Those components live in the same source code repository and
all together constitute the libcamera framework.
* The libcamera adaptation is an umbrella term designating the
components that interface to libcamera in other frameworks.
Notable examples are a V4L2 compatibility layer, a gstreamer
libcamera element, and an Android camera HAL implementation based
on libcamera.
Those components can live in the libcamera project source code
in separate repositories, or move to their respective project's
repository (for instance the gstreamer libcamera element).
* The applications and upper level frameworks are based on the
libcamera framework or libcamera adaptation, and are outside of
the scope of the libcamera project.
V4L2 Compatibility Layer
V4L2 compatibility is achieved through a shared library that traps all
accesses to camera devices and routes them to libcamera to emulate high-level
V4L2 camera devices. It is injected in a process address space through
``LD_PRELOAD`` and is completely transparent for applications.
The compatibility layer exposes camera device features on a best-effort basis,
and aims for the level of features traditionally available from a UVC camera
designed for video conferencing.
Android Camera HAL
Camera support for Android is achieved through a generic Android camera HAL
implementation on top of libcamera. The HAL implements features required by
Android and out of scope from libcamera, such as JPEG encoding support.
This component is used to provide support for ChromeOS platforms.
GStreamer element (gstlibcamerasrc)
A `GStreamer element`_ is provided to allow capture from libcamera supported
devices through GStreamer pipelines, and connect to other elements for further
processing.
Native libcamera API
Applications can make use of the libcamera API directly using the C++
API. An example application and walkthrough using the libcamera API can be
followed in the :doc:`Application writer's guide </guides/application-developer>`
.. _GStreamer element: https://gstreamer.freedesktop.org/documentation/application-development/basics/elements.html
Licensing
=========
The libcamera core is covered by the `LGPL-2.1-or-later`_ license. Pipeline
Handlers are a part of the libcamera code base and need to be contributed
upstream by device vendors. IPA modules included in libcamera are covered by a
free software license, however third-parties may develop IPA modules outside of
libcamera and distribute them under a closed-source license, provided they do
not include source code from the libcamera project.
The libcamera project itself contains multiple libraries, applications and
utilities. Licenses are expressed through SPDX tags in text-based files that
support comments, and through the .reuse/dep5 file otherwise. A copy of all
licenses are stored in the LICENSES directory, and a full summary of the
licensing used throughout the project can be found in the COPYING.rst document.
Applications which link dynamically against libcamera and use only the public
API are an independent work of the authors and have no license restrictions
imposed upon them from libcamera.
.. _LGPL-2.1-or-later: https://spdx.org/licenses/LGPL-2.1-or-later.html

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
.. _lens-driver-requirements:
Lens Driver Requirements

View file

@ -1,168 +0,0 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
libcamera Architecture
======================
While offering a unified API towards upper layers, and presenting itself as a
single library, libcamera isn't monolithic. It exposes multiple components
through its public API and is built around a set of separate helpers internally.
Hardware abstractions are handled through the use of device-specific components
where required and dynamically loadable plugins are used to separate image
processing algorithms from the core libcamera codebase.
::
--------------------------< libcamera Public API >---------------------------
^ ^
| |
v v
+-------------+ +---------------------------------------------------+
| Camera | | Camera Device |
| Manager | | +-----------------------------------------------+ |
+-------------+ | | Device-Agnostic | |
^ | | | |
| | | +--------------------------+ |
| | | | ~~~~~~~~~~~~~~~~~~~~~~~ |
| | | | { +-----------------+ } |
| | | | } | //// Image //// | { |
| | | | <-> | / Processing // | } |
| | | | } | / Algorithms // | { |
| | | | { +-----------------+ } |
| | | | ~~~~~~~~~~~~~~~~~~~~~~~ |
| | | | ========================== |
| | | | +-----------------+ |
| | | | | // Pipeline /// | |
| | | | <-> | /// Handler /// | |
| | | | | /////////////// | |
| | +--------------------+ +-----------------+ |
| | Device-Specific |
| +---------------------------------------------------+
| ^ ^
| | |
v v v
+--------------------------------------------------------------------+
| Helpers and Support Classes |
| +-------------+ +-------------+ +-------------+ +-------------+ |
| | MC & V4L2 | | Buffers | | Sandboxing | | Plugins | |
| | Support | | Allocator | | IPC | | Manager | |
| +-------------+ +-------------+ +-------------+ +-------------+ |
| +-------------+ +-------------+ |
| | Pipeline | | ... | |
| | Runner | | | |
| +-------------+ +-------------+ |
+--------------------------------------------------------------------+
/// Device-Specific Components
~~~ Sandboxing
Camera Manager
The Camera Manager enumerates cameras and instantiates Pipeline Handlers to
manage each Camera that libcamera supports. The Camera Manager supports
hotplug detection and notification events when supported by the underlying
kernel devices.
There is only ever one instance of the Camera Manager running per application.
Each application's instance of the Camera Manager ensures that only a single
application can take control of a camera device at once.
Read the `Camera Manager API`_ documentation for more details.
.. _Camera Manager API: https://libcamera.org/api-html/classlibcamera_1_1CameraManager.html
Camera Device
The Camera class represents a single item of camera hardware that is capable
of producing one or more image streams, and provides the API to interact with
the underlying device.
If a system has multiple instances of the same hardware attached, each has its
own instance of the camera class.
The API exposes full control of the device to upper layers of libcamera through
the public API, making it the highest level object libcamera exposes, and the
object that all other API operations interact with from configuration to
capture.
Read the `Camera API`_ documentation for more details.
.. _Camera API: https://libcamera.org/api-html/classlibcamera_1_1Camera.html
Pipeline Handler
The Pipeline Handler manages the complex pipelines exposed by the kernel
drivers through the Media Controller and V4L2 APIs. It abstracts pipeline
handling to hide device-specific details from the rest of the library, and
implements both pipeline configuration based on stream configuration, and
pipeline runtime execution and scheduling when needed by the device.
The Pipeline Handler lives in the same process as the rest of the library, and
has access to all helpers and kernel camera-related devices.
Hardware abstraction is handled by device specific Pipeline Handlers which are
derived from the Pipeline Handler base class allowing commonality to be shared
among the implementations.
Derived pipeline handlers create Camera device instances based on the devices
they detect and support on the running system, and are responsible for
managing the interactions with a camera device.
More details can be found in the `PipelineHandler API`_ documentation, and the
:doc:`Pipeline Handler Writers Guide <guides/pipeline-handler>`.
.. _PipelineHandler API: https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html
Image Processing Algorithms
Together with the hardware image processing and hardware statistics
collection, the Image Processing Algorithms (IPA) implement 3A (Auto-Exposure,
Auto-White Balance and Auto-Focus) and other algorithms. They run on the CPU
and control hardware image processing based on the parameters supplied by
upper layers, closing the control loop of the ISP.
IPAs are loaded as external plugins named IPA Modules. IPA Modules can be part
of the libcamera code base or provided externally by camera vendors as
open-source or closed-source components.
Open source IPA Modules built with libcamera are run in the same process space
as libcamera. External IPA Modules are run in a separate sandboxed process. In
either case, they can only interact with libcamera through the API provided by
the Pipeline Handler. They have a restricted view of the system, with no direct
access to kernel camera devices, no access to networking APIs, and limited
access to file systems. All their accesses to image and metadata are mediated
by dmabuf instances explicitly passed by the Pipeline Handler to the IPA
Module.
IPA Modules are only required for platforms and devices with an ISP controlled
by the host CPU. Camera sensors which have an integrated ISP are not
controlled through the IPA Module.
Helpers and Support Classes
While Pipeline Handlers are device-specific, implementations are expected to
share code due to usage of identical APIs towards the kernel camera drivers
and the Image Processing Algorithms. This includes without limitation handling
of the MC and V4L2 APIs, buffer management through dmabuf, and pipeline
discovery, configuration and scheduling. Such code will be factored out to
helpers when applicable.
Other parts of libcamera will also benefit from factoring code out to
self-contained support classes, even if such code is present only once in the
code base, in order to keep the source code clean and easy to read. This
should be the case for instance for plugin management.
Platform Support
----------------
The library currently supports the following hardware platforms specifically
with dedicated pipeline handlers:
- Arm Mali-C55
- Intel IPU3 (ipu3)
- NXP i.MX8MP (imx8-isi and rkisp1)
- RaspberryPi 3, 4 and zero (rpi/vc4)
- Rockchip RK3399 (rkisp1)
Furthermore, generic platform support is provided for the following:
- USB video device class cameras (uvcvideo)
- iMX7, IPU6, Allwinner Sun6i (simple)
- Virtual media controller driver for test use cases (vimc)

View file

@ -1,33 +0,0 @@
/**
\mainpage libcamera API reference
Welcome to the API reference for <a href="https://libcamera.org/">libcamera</a>,
a complex camera support library for Linux, Android and ChromeOS. These pages
are automatically generated from the libcamera source code and describe the API
in detail - if this is your first interaction with libcamera then you may find
it useful to visit the [documentation](../introduction.html) in
the first instance, which can provide a more generic introduction to the
library's concepts.
\if internal
As a follow-on to the developer's guide, to assist you in adding support for
your platform the [pipeline handler writer's guide](../guides/pipeline-handler.html)
and the [ipa module writer's guide](../guides/ipa.html) should be helpful.
The full libcamera API is documented here. If you wish to see only the public
part of the API you can use [these pages](../api-html/index.html) instead.
\else
As a follow-on to the developer's guide, to assist you in using libcamera within
your project the [application developer's guide](../guides/application-developer.html)
gives an overview on how to achieve that.
Only the public part of the libcamera API is documented here; if you are a
developer seeking to add support for your hardware to the library or make other
improvements, you should switch to the internal API
[reference pages](../internal-api-html/index.html) instead.
\endif
*/

View file

@ -1,25 +0,0 @@
/* SPDX-License-Identifier: CC-BY-SA-4.0 */
digraph board {
rankdir=TB
n00000001 [label="{{} | mali-c55 tpg\n/dev/v4l-subdev0 | {<port0> 0}}", shape=Mrecord, style=filled, fillcolor=green]
n00000001:port0 -> n00000003:port0 [style=dashed]
n00000003 [label="{{<port0> 0 | <port4> 4} | mali-c55 isp\n/dev/v4l-subdev1 | {<port1> 1 | <port2> 2 | <port3> 3}}", shape=Mrecord, style=filled, fillcolor=green]
n00000003:port1 -> n00000009:port0 [style=bold]
n00000003:port2 -> n00000009:port2 [style=bold]
n00000003:port1 -> n0000000d:port0 [style=bold]
n00000003:port3 -> n0000001c
n00000009 [label="{{<port0> 0 | <port2> 2} | mali-c55 resizer fr\n/dev/v4l-subdev2 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
n00000009:port1 -> n00000010
n0000000d [label="{{<port0> 0} | mali-c55 resizer ds\n/dev/v4l-subdev3 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
n0000000d:port1 -> n00000014
n00000010 [label="mali-c55 fr\n/dev/video0", shape=box, style=filled, fillcolor=yellow]
n00000014 [label="mali-c55 ds\n/dev/video1", shape=box, style=filled, fillcolor=yellow]
n00000018 [label="mali-c55 3a params\n/dev/video2", shape=box, style=filled, fillcolor=yellow]
n00000018 -> n00000003:port4
n0000001c [label="mali-c55 3a stats\n/dev/video3", shape=box, style=filled, fillcolor=yellow]
n00000030 [label="{{<port0> 0} | lte-csi2-rx\n/dev/v4l-subdev4 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
n00000030:port1 -> n00000003:port0
n00000035 [label="{{} | imx415 1-001a\n/dev/v4l-subdev5 | {<port0> 0}}", shape=Mrecord, style=filled, fillcolor=green]
n00000035:port0 -> n00000030:port0 [style=bold]
}

View file

@ -24,100 +24,44 @@ if doxygen.found() and dot.found()
cdata.set('PREDEFINED', ' \\\n\t\t\t '.join(doxygen_predefined))
doxyfile_common = configure_file(input : 'Doxyfile-common.in',
output : 'Doxyfile-common',
doxyfile = configure_file(input : 'Doxyfile.in',
output : 'Doxyfile',
configuration : cdata)
doxygen_public_input = [
libcamera_base_public_headers,
libcamera_base_public_sources,
libcamera_public_headers,
libcamera_public_sources,
]
doxygen_internal_input = [
libcamera_base_private_headers,
libcamera_base_internal_sources,
doxygen_input = [
doxyfile,
libcamera_base_headers,
libcamera_base_sources,
libcamera_internal_headers,
libcamera_internal_sources,
libcamera_ipa_headers,
libcamera_ipa_interfaces,
libcamera_public_headers,
libcamera_sources,
libipa_headers,
libipa_sources,
]
if is_variable('ipu3_ipa_sources')
doxygen_internal_input += [ipu3_ipa_sources]
doxygen_input += [ipu3_ipa_sources]
endif
# We run doxygen twice - the first run excludes internal API objects as it
# is intended to document the public API only. A second run covers all of
# the library's objects for libcamera developers. Common configuration is
# set in an initially generated Doxyfile, which is then included by the two
# final Doxyfiles.
# This is the "public" run of doxygen generating an abridged version of the
# API's documentation.
doxyfile_tmpl = configure_file(input : 'Doxyfile-public.in',
output : 'Doxyfile-public.tmpl',
configuration : cdata)
# The set of public input files stored in the doxygen_public_input array
# needs to be set in Doxyfile public. We can't pass them through cdata
# cdata, as some of the array members are custom_tgt instances, which
# configuration_data.set() doesn't support. Using a separate script invoked
# through custom_target(), which supports custom_tgt instances as inputs.
doxyfile = custom_target('doxyfile-public',
input : [
doxygen_public_input,
],
output : 'Doxyfile-public',
command : [
'gen-doxyfile.py',
'-o', '@OUTPUT@',
doxyfile_tmpl,
'@INPUT@',
])
custom_target('doxygen-public',
input : [
doxyfile,
doxyfile_common,
],
custom_target('doxygen',
input : doxygen_input,
output : 'api-html',
command : [doxygen, doxyfile],
install : true,
install_dir : doc_install_dir,
install_tag : 'doc')
# This is the internal documentation, which hard-codes a list of directories
# to parse in its doxyfile.
doxyfile = configure_file(input : 'Doxyfile-internal.in',
output : 'Doxyfile-internal',
configuration : cdata)
custom_target('doxygen-internal',
input : [
doxyfile,
doxyfile_common,
doxygen_internal_input,
],
output : 'internal-api-html',
command : [doxygen, doxyfile],
install : true,
install_dir : doc_install_dir,
install_tag : 'doc-internal')
endif
#
# Sphinx
#
sphinx = find_program('sphinx-build-3', 'sphinx-build',
required : get_option('documentation'))
sphinx = find_program('sphinx-build-3', required : false)
if not sphinx.found()
sphinx = find_program('sphinx-build', required : get_option('documentation'))
endif
if sphinx.found()
docs_sources = [
@ -126,19 +70,15 @@ if sphinx.found()
'coding-style.rst',
'conf.py',
'contributing.rst',
'design/ae.rst',
'documentation-contents.rst',
'docs.rst',
'environment_variables.rst',
'feature_requirements.rst',
'guides/application-developer.rst',
'guides/introduction.rst',
'guides/ipa.rst',
'guides/pipeline-handler.rst',
'guides/tracing.rst',
'index.rst',
'introduction.rst',
'lens_driver_requirements.rst',
'libcamera_architecture.rst',
'mali-c55.dot',
'python-bindings.rst',
'sensor_driver_requirements.rst',
'software-isp-benchmarking.rst',

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
.. _python-bindings:
Python Bindings for libcamera

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
.. _sensor-driver-requirements:
Sensor Driver Requirements

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
.. _software-isp-benchmarking:
Software ISP benchmarking

View file

@ -283,13 +283,9 @@ div#signature {
font-size: 12px;
}
#licensing div.toctree-wrapper {
#libcamera div.toctree-wrapper {
height: 0px;
margin: 0px;
padding: 0px;
visibility: hidden;
}
.documentation-nav {
display: none;
}

View file

@ -1,44 +0,0 @@
/**
* \page thread-safety Reentrancy and Thread-Safety
*
* Through the documentation, several terms are used to define how classes and
* their member functions can be used from multiple threads.
*
* - A **reentrant** function may be called simultaneously from multiple
* threads if and only if each invocation uses a different instance of the
* class. This is the default for all member functions not explictly marked
* otherwise.
*
* - \anchor thread-safe A **thread-safe** function may be called
* simultaneously from multiple threads on the same instance of a class. A
* thread-safe function is thus reentrant. Thread-safe functions may also be
* called simultaneously with any other reentrant function of the same class
* on the same instance.
*
* \internal
* - \anchor thread-bound A **thread-bound** function may be called only from
* the thread that the class instances lives in (see section \ref
* thread-objects). For instances of classes that do not derive from the
* Object class, this is the thread in which the instance was created. A
* thread-bound function is not thread-safe, and may or may not be reentrant.
* \endinternal
*
* Neither reentrancy nor thread-safety, in this context, mean that a function
* may be called simultaneously from the same thread, for instance from a
* callback invoked by the function. This may deadlock and isn't allowed unless
* separately documented.
*
* \if internal
* A class is defined as reentrant, thread-safe or thread-bound if all its
* member functions are reentrant, thread-safe or thread-bound respectively.
* \else
* A class is defined as reentrant or thread-safe if all its member functions
* are reentrant or thread-safe respectively.
* \endif
* Some member functions may additionally be documented as having additional
* thread-related attributes.
*
* Most classes are reentrant but not thread-safe, as making them fully
* thread-safe would incur locking costs considered prohibitive for the
* expected use cases.
*/

View file

@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. section-begin-libcamera
===========
libcamera
===========
@ -20,6 +22,7 @@ open-source-friendly while still protecting vendor core IP. libcamera was born
out of that collaboration and will offer modern camera support to Linux-based
systems, including traditional Linux distributions, ChromeOS and Android.
.. section-end-libcamera
.. section-begin-getting-started
Getting Started
@ -44,7 +47,7 @@ A C++ toolchain: [required]
Either {g++, clang}
Meson Build system: [required]
meson (>= 0.63) ninja-build pkg-config
meson (>= 0.60) ninja-build pkg-config
for the libcamera core: [required]
libyaml-dev python3-yaml python3-ply python3-jinja2
@ -83,10 +86,9 @@ for cam: [optional]
- libdrm-dev: Enables the KMS sink
- libjpeg-dev: Enables MJPEG on the SDL sink
- libsdl2-dev: Enables the SDL sink
- libtiff-dev: Enables writing DNG
for qcam: [optional]
libtiff-dev qt6-base-dev
libtiff-dev qtbase5-dev qttools5-dev-tools
for tracing with lttng: [optional]
liblttng-ust-dev python3-jinja2 lttng-tools
@ -94,6 +96,9 @@ for tracing with lttng: [optional]
for android: [optional]
libexif-dev libjpeg-dev
for Python bindings: [optional]
pybind11-dev
for lc-compliance: [optional]
libevent-dev libgtest-dev
@ -173,22 +178,6 @@ Which can be received on another device over the network with:
gst-launch-1.0 tcpclientsrc host=$DEVICE_IP port=5000 ! \
multipartdemux ! jpegdec ! autovideosink
The GStreamer element also supports multiple streams. This is achieved by
requesting additional source pads. Downstream caps filters can be used
to choose specific parameters like resolution and pixel format. The pad
property ``stream-role`` can be used to select a role.
The following example displays a 640x480 view finder while streaming JPEG
encoded 800x600 video. You can use the receiver pipeline above to view the
remote stream from another device.
.. code::
gst-launch-1.0 libcamerasrc name=cs src::stream-role=view-finder src_0::stream-role=video-recording \
cs.src ! queue ! video/x-raw,width=640,height=480 ! videoconvert ! autovideosink \
cs.src_0 ! queue ! video/x-raw,width=800,height=600 ! videoconvert ! \
jpegenc ! multipartmux ! tcpserversink host=0.0.0.0 port=5000
.. section-end-getting-started
Troubleshooting

View file

@ -98,14 +98,20 @@ public:
using PackType = BoundMethodPack<R, Args...>;
private:
template<std::size_t... I>
void invokePack(BoundMethodPackBase *pack, std::index_sequence<I...>)
template<std::size_t... I, typename T = R>
std::enable_if_t<!std::is_void<T>::value, void>
invokePack(BoundMethodPackBase *pack, std::index_sequence<I...>)
{
[[maybe_unused]] auto *args = static_cast<PackType *>(pack);
if constexpr (!std::is_void_v<R>)
PackType *args = static_cast<PackType *>(pack);
args->ret_ = invoke(std::get<I>(args->args_)...);
else
}
template<std::size_t... I, typename T = R>
std::enable_if_t<std::is_void<T>::value, void>
invokePack(BoundMethodPackBase *pack, std::index_sequence<I...>)
{
/* args is effectively unused when the sequence I is empty. */
PackType *args [[gnu::unused]] = static_cast<PackType *>(pack);
invoke(std::get<I>(args->args_)...);
}

View file

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021, Google Inc.
*
* Compiler support
*/
#pragma once
#if __cplusplus >= 201703L
#define __nodiscard [[nodiscard]]
#else
#define __nodiscard
#endif

View file

@ -7,6 +7,8 @@
#pragma once
#include <vector>
#include <libcamera/base/private.h>
namespace libcamera {

View file

@ -7,11 +7,11 @@
#pragma once
#include <map>
#include <stdint.h>
#include <string>
#include <sys/types.h>
#include <map>
#include <string>
#include <libcamera/base/private.h>
#include <libcamera/base/class.h>

View file

@ -7,9 +7,8 @@
#pragma once
#include <atomic>
#include <chrono>
#include <sstream>
#include <string_view>
#include <libcamera/base/private.h>
@ -30,29 +29,25 @@ enum LogSeverity {
class LogCategory
{
public:
static LogCategory *create(std::string_view name);
static LogCategory *create(const char *name);
const std::string &name() const { return name_; }
LogSeverity severity() const { return severity_.load(std::memory_order_relaxed); }
void setSeverity(LogSeverity severity) { severity_.store(severity, std::memory_order_relaxed); }
LogSeverity severity() const { return severity_; }
void setSeverity(LogSeverity severity);
static const LogCategory &defaultCategory();
private:
friend class Logger;
explicit LogCategory(std::string_view name);
explicit LogCategory(const char *name);
const std::string name_;
std::atomic<LogSeverity> severity_;
static_assert(decltype(severity_)::is_always_lock_free);
LogSeverity severity_;
};
#define LOG_DECLARE_CATEGORY(name) \
extern const LogCategory &_LOG_CATEGORY(name)();
#define LOG_DEFINE_CATEGORY(name) \
LOG_DECLARE_CATEGORY(name) \
const LogCategory &_LOG_CATEGORY(name)() \
{ \
/* The instance will be deleted by the Logger destructor. */ \
@ -65,7 +60,9 @@ class LogMessage
public:
LogMessage(const char *fileName, unsigned int line,
const LogCategory &category, LogSeverity severity,
std::string prefix = {});
const std::string &prefix = std::string());
LogMessage(LogMessage &&);
~LogMessage();
std::ostream &stream() { return msgStream_; }
@ -78,7 +75,9 @@ public:
const std::string msg() const { return msgStream_.str(); }
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(LogMessage)
LIBCAMERA_DISABLE_COPY(LogMessage)
void init(const char *fileName, unsigned int line);
std::ostringstream msgStream_;
const LogCategory &category_;

View file

@ -1,32 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2024, Ideas on Board Oy
*
* Anonymous file creation
*/
#pragma once
#include <libcamera/base/flags.h>
#include <libcamera/base/unique_fd.h>
namespace libcamera {
class MemFd
{
public:
enum class Seal {
None = 0,
Shrink = (1 << 0),
Grow = (1 << 1),
};
using Seals = Flags<Seal>;
static UniqueFD create(const char *name, std::size_t size,
Seals seals = Seal::None);
};
LIBCAMERA_FLAGS_ENABLE_OPERATORS(MemFd::Seal)
} /* namespace libcamera */

View file

@ -5,6 +5,7 @@ libcamera_base_include_dir = libcamera_include_dir / 'base'
libcamera_base_public_headers = files([
'bound_method.h',
'class.h',
'compiler.h',
'flags.h',
'object.h',
'shared_fd.h',
@ -20,7 +21,6 @@ libcamera_base_private_headers = files([
'event_notifier.h',
'file.h',
'log.h',
'memfd.h',
'message.h',
'mutex.h',
'private.h',

View file

@ -23,6 +23,10 @@ namespace libcamera {
class LIBCAMERA_TSA_CAPABILITY("mutex") Mutex final
{
public:
constexpr Mutex()
{
}
void lock() LIBCAMERA_TSA_ACQUIRE()
{
mutex_.lock();
@ -80,6 +84,10 @@ private:
class ConditionVariable final
{
public:
ConditionVariable()
{
}
void notify_one() noexcept
{
cv_.notify_one();

View file

@ -9,11 +9,9 @@
#include <list>
#include <memory>
#include <utility>
#include <vector>
#include <libcamera/base/bound_method.h>
#include <libcamera/base/class.h>
namespace libcamera {
@ -40,7 +38,7 @@ public:
{
T *obj = static_cast<T *>(this);
auto *method = new BoundMethodMember<T, R, FuncArgs...>(obj, this, func, type);
return method->activate(std::forward<Args>(args)..., true);
return method->activate(args..., true);
}
Thread *thread() const { return thread_; }
@ -54,8 +52,6 @@ protected:
bool assertThreadBound(const char *message);
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(Object)
friend class SignalBase;
friend class Thread;

View file

@ -10,6 +10,7 @@
#include <functional>
#include <list>
#include <type_traits>
#include <vector>
#include <libcamera/base/bound_method.h>
@ -63,8 +64,11 @@ public:
#ifndef __DOXYGEN__
template<typename T, typename Func,
std::enable_if_t<std::is_base_of<Object, T>::value &&
std::is_invocable_v<Func, Args...>> * = nullptr>
std::enable_if_t<std::is_base_of<Object, T>::value
#if __cplusplus >= 201703L
&& std::is_invocable_v<Func, Args...>
#endif
> * = nullptr>
void connect(T *obj, Func func, ConnectionType type = ConnectionTypeAuto)
{
Object *object = static_cast<Object *>(obj);
@ -72,8 +76,11 @@ public:
}
template<typename T, typename Func,
std::enable_if_t<!std::is_base_of<Object, T>::value &&
std::is_invocable_v<Func, Args...>> * = nullptr>
std::enable_if_t<!std::is_base_of<Object, T>::value
#if __cplusplus >= 201703L
&& std::is_invocable_v<Func, Args...>
#endif
> * = nullptr>
#else
template<typename T, typename Func>
#endif

View file

@ -10,6 +10,7 @@
#include <array>
#include <iterator>
#include <limits>
#include <stddef.h>
#include <type_traits>
namespace libcamera {
@ -346,7 +347,13 @@ public:
}
constexpr Span(const Span &other) noexcept = default;
constexpr Span &operator=(const Span &other) noexcept = default;
constexpr Span &operator=(const Span &other) noexcept
{
data_ = other.data_;
size_ = other.size_;
return *this;
}
constexpr iterator begin() const { return data(); }
constexpr const_iterator cbegin() const { return begin(); }

View file

@ -13,10 +13,8 @@
#include <libcamera/base/private.h>
#include <libcamera/base/class.h>
#include <libcamera/base/message.h>
#include <libcamera/base/signal.h>
#include <libcamera/base/span.h>
#include <libcamera/base/utils.h>
namespace libcamera {
@ -37,8 +35,6 @@ public:
void exit(int code = 0);
bool wait(utils::duration duration = utils::duration::max());
int setThreadAffinity(const Span<const unsigned int> &cpus);
bool isRunning();
Signal<> finished;
@ -48,21 +44,16 @@ public:
EventDispatcher *eventDispatcher();
void dispatchMessages(Message::Type type = Message::Type::None,
Object *receiver = nullptr);
void dispatchMessages(Message::Type type = Message::Type::None);
protected:
int exec();
virtual void run();
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(Thread)
void startThread();
void finishThread();
void setThreadAffinityInternal();
void postMessage(std::unique_ptr<Message> msg, Object *receiver);
void removeMessages(Object *receiver);

View file

@ -8,6 +8,7 @@
#pragma once
#include <chrono>
#include <stdint.h>
#include <libcamera/base/private.h>

View file

@ -10,6 +10,7 @@
#include <utility>
#include <libcamera/base/class.h>
#include <libcamera/base/compiler.h>
namespace libcamera {
@ -42,7 +43,7 @@ public:
return *this;
}
[[nodiscard]] int release()
__nodiscard int release()
{
int fd = fd_;
fd_ = -1;

View file

@ -9,13 +9,12 @@
#include <algorithm>
#include <chrono>
#include <functional>
#include <iterator>
#include <memory>
#include <ostream>
#include <sstream>
#include <stdint.h>
#include <string.h>
#include <string>
#include <string.h>
#include <sys/time.h>
#include <type_traits>
#include <utility>
@ -91,30 +90,6 @@ template<typename T,
_hex hex(T value, unsigned int width = 0);
#ifndef __DOXYGEN__
template<>
inline _hex hex<int8_t>(int8_t value, unsigned int width)
{
return { static_cast<uint64_t>(value), width ? width : 2 };
}
template<>
inline _hex hex<uint8_t>(uint8_t value, unsigned int width)
{
return { static_cast<uint64_t>(value), width ? width : 2 };
}
template<>
inline _hex hex<int16_t>(int16_t value, unsigned int width)
{
return { static_cast<uint64_t>(value), width ? width : 4 };
}
template<>
inline _hex hex<uint16_t>(uint16_t value, unsigned int width)
{
return { static_cast<uint64_t>(value), width ? width : 4 };
}
template<>
inline _hex hex<int32_t>(int32_t value, unsigned int width)
{
@ -205,16 +180,7 @@ public:
iterator &operator++();
std::string operator*() const;
bool operator==(const iterator &other) const
{
return pos_ == other.pos_;
}
bool operator!=(const iterator &other) const
{
return !(*this == other);
}
bool operator!=(const iterator &other) const;
private:
const StringSplitter *ss_;
@ -222,15 +188,8 @@ public:
std::string::size_type next_;
};
iterator begin() const
{
return { this, 0 };
}
iterator end() const
{
return { this, std::string::npos };
}
iterator begin() const;
iterator end() const;
private:
std::string str_;
@ -416,18 +375,6 @@ constexpr std::underlying_type_t<Enum> to_underlying(Enum e) noexcept
return static_cast<std::underlying_type_t<Enum>>(e);
}
class ScopeExitActions
{
public:
~ScopeExitActions();
void operator+=(std::function<void()> &&action);
void release();
private:
std::vector<std::function<void()>> actions_;
};
} /* namespace utils */
#ifndef __DOXYGEN__

View file

@ -9,7 +9,6 @@
#include <memory>
#include <string>
#include <string_view>
#include <sys/types.h>
#include <vector>
@ -32,7 +31,7 @@ public:
void stop();
std::vector<std::shared_ptr<Camera>> cameras() const;
std::shared_ptr<Camera> get(std::string_view id);
std::shared_ptr<Camera> get(const std::string &id);
static const std::string &version() { return version_; }

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
* {{mode|capitalize}} ID list
* Control ID list
*
* This file is auto-generated. Do not edit.
*/
@ -18,44 +18,18 @@
namespace libcamera {
namespace {{mode}} {
namespace controls {
extern const ControlIdMap {{mode}};
{%- for vendor, ctrls in controls -%}
{% if vendor != 'libcamera' %}
namespace {{vendor}} {
#define LIBCAMERA_HAS_{{vendor|upper}}_VENDOR_{{mode|upper}}
{%- endif %}
{% if ctrls %}
enum {
{%- for ctrl in ctrls %}
{{ctrl.name|snake_case|upper}} = {{ctrl.id}},
{%- endfor %}
${ids}
};
{% endif %}
{% for ctrl in ctrls -%}
{% if ctrl.is_enum -%}
enum {{ctrl.name}}Enum {
{%- for enum in ctrl.enum_values %}
{{enum.name}} = {{enum.value}},
{%- endfor %}
};
extern const std::array<const ControlValue, {{ctrl.enum_values_count}}> {{ctrl.name}}Values;
extern const std::map<std::string, {{ctrl.type}}> {{ctrl.name}}NameValueMap;
{% endif -%}
extern const Control<{{ctrl.type}}> {{ctrl.name}};
{% endfor -%}
${controls}
{% if vendor != 'libcamera' %}
} /* namespace {{vendor}} */
{% endif -%}
extern const ControlIdMap controls;
{% endfor %}
} /* namespace {{mode}} */
${vendor_controls}
} /* namespace controls */
} /* namespace libcamera */

View file

@ -8,7 +8,6 @@
#pragma once
#include <assert.h>
#include <map>
#include <optional>
#include <set>
#include <stdint.h>
@ -17,7 +16,6 @@
#include <vector>
#include <libcamera/base/class.h>
#include <libcamera/base/flags.h>
#include <libcamera/base/span.h>
#include <libcamera/geometry.h>
@ -30,102 +28,67 @@ enum ControlType {
ControlTypeNone,
ControlTypeBool,
ControlTypeByte,
ControlTypeUnsigned16,
ControlTypeUnsigned32,
ControlTypeInteger32,
ControlTypeInteger64,
ControlTypeFloat,
ControlTypeString,
ControlTypeRectangle,
ControlTypeSize,
ControlTypePoint,
};
namespace details {
template<typename T, typename = std::void_t<>>
template<typename T>
struct control_type {
};
template<>
struct control_type<void> {
static constexpr ControlType value = ControlTypeNone;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<bool> {
static constexpr ControlType value = ControlTypeBool;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<uint8_t> {
static constexpr ControlType value = ControlTypeByte;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<uint16_t> {
static constexpr ControlType value = ControlTypeUnsigned16;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<uint32_t> {
static constexpr ControlType value = ControlTypeUnsigned32;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<int32_t> {
static constexpr ControlType value = ControlTypeInteger32;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<int64_t> {
static constexpr ControlType value = ControlTypeInteger64;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<float> {
static constexpr ControlType value = ControlTypeFloat;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<std::string> {
static constexpr ControlType value = ControlTypeString;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<Rectangle> {
static constexpr ControlType value = ControlTypeRectangle;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<Size> {
static constexpr ControlType value = ControlTypeSize;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<Point> {
static constexpr ControlType value = ControlTypePoint;
static constexpr std::size_t size = 0;
};
template<typename T, std::size_t N>
struct control_type<Span<T, N>, std::enable_if_t<control_type<std::remove_cv_t<T>>::size == 0>> : public control_type<std::remove_cv_t<T>> {
static constexpr std::size_t size = N;
};
template<typename T>
struct control_type<T, std::enable_if_t<std::is_enum_v<T> && sizeof(T) == sizeof(int32_t)>> : public control_type<int32_t> {
struct control_type<Span<T, N>> : public control_type<std::remove_cv_t<T>> {
};
} /* namespace details */
@ -250,44 +213,23 @@ private:
class ControlId
{
public:
enum class Direction {
In = (1 << 0),
Out = (1 << 1),
};
using DirectionFlags = Flags<Direction>;
ControlId(unsigned int id, const std::string &name, const std::string &vendor,
ControlType type, DirectionFlags direction,
std::size_t size = 0,
const std::map<std::string, int32_t> &enumStrMap = {});
ControlId(unsigned int id, const std::string &name, ControlType type)
: id_(id), name_(name), type_(type)
{
}
unsigned int id() const { return id_; }
const std::string &name() const { return name_; }
const std::string &vendor() const { return vendor_; }
ControlType type() const { return type_; }
DirectionFlags direction() const { return direction_; }
bool isInput() const { return !!(direction_ & Direction::In); }
bool isOutput() const { return !!(direction_ & Direction::Out); }
bool isArray() const { return size_ > 0; }
std::size_t size() const { return size_; }
const std::map<int32_t, std::string> &enumerators() const { return reverseMap_; }
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(ControlId)
unsigned int id_;
std::string name_;
std::string vendor_;
ControlType type_;
DirectionFlags direction_;
std::size_t size_;
std::map<std::string, int32_t> enumStrMap_;
std::map<int32_t, std::string> reverseMap_;
};
LIBCAMERA_FLAGS_ENABLE_OPERATORS(ControlId::Direction)
static inline bool operator==(unsigned int lhs, const ControlId &rhs)
{
return lhs == rhs.id();
@ -314,11 +256,8 @@ class Control : public ControlId
public:
using type = T;
Control(unsigned int id, const char *name, const char *vendor,
ControlId::DirectionFlags direction,
const std::map<std::string, int32_t> &enumStrMap = {})
: ControlId(id, name, vendor, details::control_type<std::remove_cv_t<T>>::value,
direction, details::control_type<std::remove_cv_t<T>>::size, enumStrMap)
Control(unsigned int id, const char *name)
: ControlId(id, name, details::control_type<std::remove_cv_t<T>>::value)
{
}

View file

@ -7,6 +7,7 @@
#pragma once
#include <assert.h>
#include <limits>
#include <memory>
#include <stdint.h>
@ -26,7 +27,6 @@ struct FrameMetadata {
FrameSuccess,
FrameError,
FrameCancelled,
FrameStartup,
};
struct Plane {

View file

@ -11,6 +11,8 @@
#include <ostream>
#include <string>
#include <libcamera/base/compiler.h>
namespace libcamera {
class Rectangle;
@ -108,7 +110,7 @@ public:
return *this;
}
[[nodiscard]] constexpr Size alignedDownTo(unsigned int hAlignment,
__nodiscard constexpr Size alignedDownTo(unsigned int hAlignment,
unsigned int vAlignment) const
{
return {
@ -117,7 +119,7 @@ public:
};
}
[[nodiscard]] constexpr Size alignedUpTo(unsigned int hAlignment,
__nodiscard constexpr Size alignedUpTo(unsigned int hAlignment,
unsigned int vAlignment) const
{
return {
@ -126,7 +128,7 @@ public:
};
}
[[nodiscard]] constexpr Size boundedTo(const Size &bound) const
__nodiscard constexpr Size boundedTo(const Size &bound) const
{
return {
std::min(width, bound.width),
@ -134,7 +136,7 @@ public:
};
}
[[nodiscard]] constexpr Size expandedTo(const Size &expand) const
__nodiscard constexpr Size expandedTo(const Size &expand) const
{
return {
std::max(width, expand.width),
@ -142,7 +144,7 @@ public:
};
}
[[nodiscard]] constexpr Size grownBy(const Size &margins) const
__nodiscard constexpr Size grownBy(const Size &margins) const
{
return {
width + margins.width,
@ -150,7 +152,7 @@ public:
};
}
[[nodiscard]] constexpr Size shrunkBy(const Size &margins) const
__nodiscard constexpr Size shrunkBy(const Size &margins) const
{
return {
width > margins.width ? width - margins.width : 0,
@ -158,10 +160,10 @@ public:
};
}
[[nodiscard]] Size boundedToAspectRatio(const Size &ratio) const;
[[nodiscard]] Size expandedToAspectRatio(const Size &ratio) const;
__nodiscard Size boundedToAspectRatio(const Size &ratio) const;
__nodiscard Size expandedToAspectRatio(const Size &ratio) const;
[[nodiscard]] Rectangle centeredTo(const Point &center) const;
__nodiscard Rectangle centeredTo(const Point &center) const;
Size operator*(float factor) const;
Size operator/(float factor) const;
@ -260,15 +262,6 @@ public:
{
}
constexpr Rectangle(const Point &point1, const Point &point2)
: Rectangle(std::min(point1.x, point2.x), std::min(point1.y, point2.y),
static_cast<unsigned int>(std::max(point1.x, point2.x)) -
static_cast<unsigned int>(std::min(point1.x, point2.x)),
static_cast<unsigned int>(std::max(point1.y, point2.y)) -
static_cast<unsigned int>(std::min(point1.y, point2.y)))
{
}
int x;
int y;
unsigned int width;
@ -292,14 +285,11 @@ public:
Rectangle &scaleBy(const Size &numerator, const Size &denominator);
Rectangle &translateBy(const Point &point);
[[nodiscard]] Rectangle boundedTo(const Rectangle &bound) const;
[[nodiscard]] Rectangle enclosedIn(const Rectangle &boundary) const;
[[nodiscard]] Rectangle scaledBy(const Size &numerator,
__nodiscard Rectangle boundedTo(const Rectangle &bound) const;
__nodiscard Rectangle enclosedIn(const Rectangle &boundary) const;
__nodiscard Rectangle scaledBy(const Size &numerator,
const Size &denominator) const;
[[nodiscard]] Rectangle translatedBy(const Point &point) const;
Rectangle transformedBetween(const Rectangle &source,
const Rectangle &target) const;
__nodiscard Rectangle translatedBy(const Point &point) const;
};
bool operator==(const Rectangle &lhs, const Rectangle &rhs);

View file

@ -11,7 +11,6 @@
#include <list>
#include <memory>
#include <set>
#include <stdint.h>
#include <string>
#include <libcamera/base/class.h>
@ -33,7 +32,6 @@ public:
~Private();
PipelineHandler *pipe() { return pipe_.get(); }
const PipelineHandler *pipe() const { return pipe_.get(); }
std::list<Request *> queuedRequests_;
ControlInfoMap controlInfo_;

View file

@ -7,7 +7,6 @@
#pragma once
#include <memory>
#include <stdint.h>
#include <string>
#include <libcamera/base/class.h>

View file

@ -9,6 +9,7 @@
#include <libcamera/camera_manager.h>
#include <map>
#include <memory>
#include <sys/types.h>
#include <vector>
@ -18,14 +19,13 @@
#include <libcamera/base/thread.h>
#include <libcamera/base/thread_annotations.h>
#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/process.h"
namespace libcamera {
class Camera;
class DeviceEnumerator;
class IPAManager;
class PipelineHandlerFactoryBase;
class CameraManager::Private : public Extensible::Private, public Thread
{
@ -38,8 +38,6 @@ public:
void addCamera(std::shared_ptr<Camera> camera) LIBCAMERA_TSA_EXCLUDES(mutex_);
void removeCamera(std::shared_ptr<Camera> camera) LIBCAMERA_TSA_EXCLUDES(mutex_);
IPAManager *ipaManager() const { return ipaManager_.get(); }
protected:
void run() override;
@ -64,7 +62,7 @@ private:
std::unique_ptr<DeviceEnumerator> enumerator_;
std::unique_ptr<IPAManager> ipaManager_;
IPAManager ipaManager_;
ProcessManager processManager_;
};

View file

@ -8,12 +8,11 @@
#pragma once
#include <memory>
#include <stdint.h>
#include <string>
#include <variant>
#include <vector>
#include <libcamera/base/class.h>
#include <libcamera/base/log.h>
#include <libcamera/control_ids.h>
#include <libcamera/controls.h>
@ -21,8 +20,10 @@
#include <libcamera/orientation.h>
#include <libcamera/transform.h>
#include <libcamera/ipa/core_ipa_interface.h>
#include "libcamera/internal/bayer_format.h"
#include "libcamera/internal/camera_sensor_properties.h"
#include "libcamera/internal/formats.h"
#include "libcamera/internal/v4l2_subdevice.h"
namespace libcamera {
@ -31,101 +32,95 @@ class CameraLens;
class MediaEntity;
class SensorConfiguration;
struct CameraSensorProperties;
enum class Orientation;
struct IPACameraSensorInfo;
class CameraSensor
class CameraSensor : protected Loggable
{
public:
virtual ~CameraSensor();
explicit CameraSensor(const MediaEntity *entity);
~CameraSensor();
virtual const std::string &model() const = 0;
virtual const std::string &id() const = 0;
int init();
virtual const MediaEntity *entity() const = 0;
virtual V4L2Subdevice *device() = 0;
const std::string &model() const { return model_; }
const std::string &id() const { return id_; }
virtual CameraLens *focusLens() = 0;
const MediaEntity *entity() const { return entity_; }
V4L2Subdevice *device() { return subdev_.get(); }
virtual const std::vector<unsigned int> &mbusCodes() const = 0;
virtual std::vector<Size> sizes(unsigned int mbusCode) const = 0;
virtual Size resolution() const = 0;
CameraLens *focusLens() { return focusLens_.get(); }
virtual V4L2SubdeviceFormat
getFormat(const std::vector<unsigned int> &mbusCodes,
const Size &size, const Size maxSize = Size()) const = 0;
virtual int setFormat(V4L2SubdeviceFormat *format,
Transform transform = Transform::Identity) = 0;
virtual int tryFormat(V4L2SubdeviceFormat *format) const = 0;
const std::vector<unsigned int> &mbusCodes() const { return mbusCodes_; }
std::vector<Size> sizes(unsigned int mbusCode) const;
Size resolution() const;
virtual int applyConfiguration(const SensorConfiguration &config,
V4L2SubdeviceFormat getFormat(const std::vector<unsigned int> &mbusCodes,
const Size &size) const;
int setFormat(V4L2SubdeviceFormat *format,
Transform transform = Transform::Identity);
int tryFormat(V4L2SubdeviceFormat *format) const;
int applyConfiguration(const SensorConfiguration &config,
Transform transform = Transform::Identity,
V4L2SubdeviceFormat *sensorFormat = nullptr) = 0;
V4L2SubdeviceFormat *sensorFormat = nullptr);
virtual V4L2Subdevice::Stream imageStream() const;
virtual std::optional<V4L2Subdevice::Stream> embeddedDataStream() const;
virtual V4L2SubdeviceFormat embeddedDataFormat() const;
virtual int setEmbeddedDataEnabled(bool enable);
const ControlList &properties() const { return properties_; }
int sensorInfo(IPACameraSensorInfo *info) const;
Transform computeTransform(Orientation *orientation) const;
BayerFormat::Order bayerOrder(Transform t) const;
virtual const ControlList &properties() const = 0;
virtual int sensorInfo(IPACameraSensorInfo *info) const = 0;
virtual Transform computeTransform(Orientation *orientation) const = 0;
virtual BayerFormat::Order bayerOrder(Transform t) const = 0;
const ControlInfoMap &controls() const;
ControlList getControls(const std::vector<uint32_t> &ids);
int setControls(ControlList *ctrls);
virtual const ControlInfoMap &controls() const = 0;
virtual ControlList getControls(const std::vector<uint32_t> &ids) = 0;
virtual int setControls(ControlList *ctrls) = 0;
const std::vector<controls::draft::TestPatternModeEnum> &testPatternModes() const
{
return testPatternModes_;
}
int setTestPatternMode(controls::draft::TestPatternModeEnum mode);
virtual const std::vector<controls::draft::TestPatternModeEnum> &
testPatternModes() const = 0;
virtual int setTestPatternMode(controls::draft::TestPatternModeEnum mode) = 0;
virtual const CameraSensorProperties::SensorDelays &sensorDelays() = 0;
};
class CameraSensorFactoryBase
{
public:
CameraSensorFactoryBase(const char *name, int priority);
virtual ~CameraSensorFactoryBase() = default;
static std::unique_ptr<CameraSensor> create(MediaEntity *entity);
const std::string &name() const { return name_; }
int priority() const { return priority_; }
protected:
std::string logPrefix() const override;
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraSensorFactoryBase)
LIBCAMERA_DISABLE_COPY(CameraSensor)
static std::vector<CameraSensorFactoryBase *> &factories();
int generateId();
int validateSensorDriver();
void initVimcDefaultProperties();
void initStaticProperties();
void initTestPatternModes();
int initProperties();
int discoverAncillaryDevices();
int applyTestPatternMode(controls::draft::TestPatternModeEnum mode);
static void registerFactory(CameraSensorFactoryBase *factory);
const MediaEntity *entity_;
std::unique_ptr<V4L2Subdevice> subdev_;
unsigned int pad_;
virtual std::variant<std::unique_ptr<CameraSensor>, int>
match(MediaEntity *entity) const = 0;
const CameraSensorProperties *staticProps_;
std::string name_;
int priority_;
std::string model_;
std::string id_;
V4L2Subdevice::Formats formats_;
std::vector<unsigned int> mbusCodes_;
std::vector<Size> sizes_;
std::vector<controls::draft::TestPatternModeEnum> testPatternModes_;
controls::draft::TestPatternModeEnum testPatternMode_;
Size pixelArraySize_;
Rectangle activeArea_;
const BayerFormat *bayerFormat_;
bool supportFlips_;
bool flipsAlterBayerOrder_;
Orientation mountingOrientation_;
ControlList properties_;
std::unique_ptr<CameraLens> focusLens_;
};
template<typename _CameraSensor>
class CameraSensorFactory final : public CameraSensorFactoryBase
{
public:
CameraSensorFactory(const char *name, int priority)
: CameraSensorFactoryBase(name, priority)
{
}
private:
std::variant<std::unique_ptr<CameraSensor>, int>
match(MediaEntity *entity) const override
{
return _CameraSensor::match(entity);
}
};
#define REGISTER_CAMERA_SENSOR(sensor, priority) \
static CameraSensorFactory<sensor> global_##sensor##Factory{ #sensor, priority };
} /* namespace libcamera */

View file

@ -8,7 +8,6 @@
#pragma once
#include <map>
#include <stdint.h>
#include <string>
#include <libcamera/control_ids.h>
@ -17,18 +16,10 @@
namespace libcamera {
struct CameraSensorProperties {
struct SensorDelays {
uint8_t exposureDelay;
uint8_t gainDelay;
uint8_t vblankDelay;
uint8_t hblankDelay;
};
static const CameraSensorProperties *get(const std::string &sensor);
Size unitCellSize;
std::map<controls::draft::TestPatternModeEnum, int32_t> testPatternModes;
SensorDelays sensorDelays;
};
} /* namespace libcamera */

View file

@ -1,68 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2024, Raspberry Pi Ltd
*
* Camera recovery algorithm
*/
#pragma once
#include <stdint.h>
namespace libcamera {
class ClockRecovery
{
public:
ClockRecovery();
void configure(unsigned int numSamples = 100, unsigned int maxJitter = 2000,
unsigned int minSamples = 10, unsigned int errorThreshold = 50000);
void reset();
void addSample();
void addSample(uint64_t input, uint64_t output);
uint64_t getOutput(uint64_t input);
private:
/* Approximate number of samples over which the model state persists. */
unsigned int numSamples_;
/* Remove any output jitter larger than this immediately. */
unsigned int maxJitter_;
/* Number of samples required before we start to use model estimates. */
unsigned int minSamples_;
/* Threshold above which we assume the wallclock has been reset. */
unsigned int errorThreshold_;
/* How many samples seen (up to numSamples_). */
unsigned int count_;
/* This gets subtracted from all input values, just to make the numbers easier. */
uint64_t inputBase_;
/* As above, for the output. */
uint64_t outputBase_;
/* The previous input sample. */
uint64_t lastInput_;
/* The previous output sample. */
uint64_t lastOutput_;
/* Average x value seen so far. */
double xAve_;
/* Average y value seen so far */
double yAve_;
/* Average x^2 value seen so far. */
double x2Ave_;
/* Average x*y value seen so far. */
double xyAve_;
/*
* The latest estimate of linear parameters to derive the output clock
* from the input.
*/
double slope_;
double offset_;
/* Use this cumulative error to monitor for spontaneous clock updates. */
double error_;
};
} /* namespace libcamera */

View file

@ -14,11 +14,9 @@
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include <libcamera/base/class.h>
#include <libcamera/base/flags.h>
#include <libcamera/base/signal.h>
#include <libcamera/geometry.h>
@ -28,25 +26,12 @@ namespace libcamera {
class FrameBuffer;
class MediaDevice;
class PixelFormat;
class Stream;
struct StreamConfiguration;
class Converter
{
public:
enum class Feature {
None = 0,
InputCrop = (1 << 0),
};
using Features = Flags<Feature>;
enum class Alignment {
Down = 0,
Up,
};
Converter(MediaDevice *media, Features features = Feature::None);
Converter(MediaDevice *media);
virtual ~Converter();
virtual int loadConfiguration(const std::string &filename) = 0;
@ -56,45 +41,25 @@ public:
virtual std::vector<PixelFormat> formats(PixelFormat input) = 0;
virtual SizeRange sizes(const Size &input) = 0;
virtual Size adjustInputSize(const PixelFormat &pixFmt,
const Size &size,
Alignment align = Alignment::Down) = 0;
virtual Size adjustOutputSize(const PixelFormat &pixFmt,
const Size &size,
Alignment align = Alignment::Down) = 0;
virtual std::tuple<unsigned int, unsigned int>
strideAndFrameSize(const PixelFormat &pixelFormat, const Size &size) = 0;
virtual int validateOutput(StreamConfiguration *cfg, bool *adjusted,
Alignment align = Alignment::Down) = 0;
virtual int configure(const StreamConfiguration &inputCfg,
const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs) = 0;
virtual bool isConfigured(const Stream *stream) const = 0;
virtual int exportBuffers(const Stream *stream, unsigned int count,
virtual int exportBuffers(unsigned int output, unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) = 0;
virtual int start() = 0;
virtual void stop() = 0;
virtual int queueBuffers(FrameBuffer *input,
const std::map<const Stream *, FrameBuffer *> &outputs) = 0;
virtual int setInputCrop(const Stream *stream, Rectangle *rect) = 0;
virtual std::pair<Rectangle, Rectangle> inputCropBounds() = 0;
virtual std::pair<Rectangle, Rectangle> inputCropBounds(const Stream *stream) = 0;
const std::map<unsigned int, FrameBuffer *> &outputs) = 0;
Signal<FrameBuffer *> inputBufferReady;
Signal<FrameBuffer *> outputBufferReady;
const std::string &deviceNode() const { return deviceNode_; }
Features features() const { return features_; }
protected:
Features features_;
private:
std::string deviceNode_;
};

View file

@ -28,9 +28,7 @@ class FrameBuffer;
class MediaDevice;
class Size;
class SizeRange;
class Stream;
struct StreamConfiguration;
class Rectangle;
class V4L2M2MDevice;
class V4L2M2MConverter : public Converter
@ -38,45 +36,31 @@ class V4L2M2MConverter : public Converter
public:
V4L2M2MConverter(MediaDevice *media);
int loadConfiguration([[maybe_unused]] const std::string &filename) override { return 0; }
bool isValid() const override { return m2m_ != nullptr; }
int loadConfiguration([[maybe_unused]] const std::string &filename) { return 0; }
bool isValid() const { return m2m_ != nullptr; }
std::vector<PixelFormat> formats(PixelFormat input) override;
SizeRange sizes(const Size &input) override;
std::vector<PixelFormat> formats(PixelFormat input);
SizeRange sizes(const Size &input);
std::tuple<unsigned int, unsigned int>
strideAndFrameSize(const PixelFormat &pixelFormat, const Size &size) override;
Size adjustInputSize(const PixelFormat &pixFmt,
const Size &size, Alignment align = Alignment::Down) override;
Size adjustOutputSize(const PixelFormat &pixFmt,
const Size &size, Alignment align = Alignment::Down) override;
strideAndFrameSize(const PixelFormat &pixelFormat, const Size &size);
int configure(const StreamConfiguration &inputCfg,
const std::vector<std::reference_wrapper<StreamConfiguration>>
&outputCfg) override;
bool isConfigured(const Stream *stream) const override;
int exportBuffers(const Stream *stream, unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfg);
int exportBuffers(unsigned int output, unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers);
int start() override;
void stop() override;
int validateOutput(StreamConfiguration *cfg, bool *adjusted,
Alignment align = Alignment::Down) override;
int start();
void stop();
int queueBuffers(FrameBuffer *input,
const std::map<const Stream *, FrameBuffer *> &outputs) override;
int setInputCrop(const Stream *stream, Rectangle *rect) override;
std::pair<Rectangle, Rectangle> inputCropBounds() override { return inputCropBounds_; }
std::pair<Rectangle, Rectangle> inputCropBounds(const Stream *stream) override;
const std::map<unsigned int, FrameBuffer *> &outputs);
private:
class V4L2M2MStream : protected Loggable
class Stream : protected Loggable
{
public:
V4L2M2MStream(V4L2M2MConverter *converter, const Stream *stream);
Stream(V4L2M2MConverter *converter, unsigned int index);
bool isValid() const { return m2m_ != nullptr; }
@ -90,11 +74,6 @@ private:
int queueBuffers(FrameBuffer *input, FrameBuffer *output);
int setInputSelection(unsigned int target, Rectangle *rect);
int getInputSelection(unsigned int target, Rectangle *rect);
std::pair<Rectangle, Rectangle> inputCropBounds();
protected:
std::string logPrefix() const override;
@ -103,23 +82,17 @@ private:
void outputBufferReady(FrameBuffer *buffer);
V4L2M2MConverter *converter_;
const Stream *stream_;
unsigned int index_;
std::unique_ptr<V4L2M2MDevice> m2m_;
unsigned int inputBufferCount_;
unsigned int outputBufferCount_;
std::pair<Rectangle, Rectangle> inputCropBounds_;
};
Size adjustSizes(const Size &size, const std::vector<SizeRange> &ranges,
Alignment align);
std::unique_ptr<V4L2M2MDevice> m2m_;
std::map<const Stream *, std::unique_ptr<V4L2M2MStream>> streams_;
std::vector<Stream> streams_;
std::map<FrameBuffer *, unsigned int> queue_;
std::pair<Rectangle, Rectangle> inputCropBounds_;
};
} /* namespace libcamera */

View file

@ -1,46 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2024, Google Inc.
*
* Debug metadata helpers
*/
#pragma once
#include <libcamera/control_ids.h>
namespace libcamera {
class DebugMetadata
{
public:
DebugMetadata() = default;
void enableByControl(const ControlList &controls);
void enable(bool enable = true);
void setParent(DebugMetadata *parent);
void moveEntries(ControlList &list);
template<typename T, typename V>
void set(const Control<T> &ctrl, const V &value)
{
if (parent_) {
parent_->set(ctrl, value);
return;
}
if (!enabled_)
return;
cache_.set(ctrl, value);
}
void set(unsigned int id, const ControlValue &value);
private:
bool enabled_ = false;
DebugMetadata *parent_ = nullptr;
ControlList cache_;
};
} /* namespace libcamera */

View file

@ -10,15 +10,13 @@
#include <stdint.h>
#include <unordered_map>
#include <libcamera/base/object.h>
#include <libcamera/controls.h>
namespace libcamera {
class V4L2Device;
class DelayedControls : public Object
class DelayedControls
{
public:
struct ControlParams {

View file

@ -7,6 +7,7 @@
#pragma once
#include <memory>
#include <string>
#include "libcamera/internal/device_enumerator.h"

View file

@ -1,80 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
* Helper class for dma-buf allocations.
*/
#pragma once
#include <memory>
#include <stdint.h>
#include <string>
#include <vector>
#include <libcamera/base/flags.h>
#include <libcamera/base/shared_fd.h>
#include <libcamera/base/unique_fd.h>
namespace libcamera {
class FrameBuffer;
class DmaBufAllocator
{
public:
enum class DmaBufAllocatorFlag {
CmaHeap = 1 << 0,
SystemHeap = 1 << 1,
UDmaBuf = 1 << 2,
};
using DmaBufAllocatorFlags = Flags<DmaBufAllocatorFlag>;
DmaBufAllocator(DmaBufAllocatorFlags flags = DmaBufAllocatorFlag::CmaHeap);
~DmaBufAllocator();
bool isValid() const { return providerHandle_.isValid(); }
UniqueFD alloc(const char *name, std::size_t size);
int exportBuffers(unsigned int count,
const std::vector<unsigned int> &planeSizes,
std::vector<std::unique_ptr<FrameBuffer>> *buffers);
private:
std::unique_ptr<FrameBuffer> createBuffer(
std::string name, const std::vector<unsigned int> &planeSizes);
UniqueFD allocFromHeap(const char *name, std::size_t size);
UniqueFD allocFromUDmaBuf(const char *name, std::size_t size);
UniqueFD providerHandle_;
DmaBufAllocatorFlag type_;
};
class DmaSyncer final
{
public:
enum class SyncType {
Read = 0,
Write,
ReadWrite,
};
explicit DmaSyncer(SharedFD fd, SyncType type = SyncType::ReadWrite);
DmaSyncer(DmaSyncer &&other) = default;
DmaSyncer &operator=(DmaSyncer &&other) = default;
~DmaSyncer();
private:
LIBCAMERA_DISABLE_COPY(DmaSyncer)
void sync(uint64_t step);
SharedFD fd_;
uint64_t flags_ = 0;
};
LIBCAMERA_FLAGS_ENABLE_OPERATORS(DmaBufAllocator::DmaBufAllocatorFlag)
} /* namespace libcamera */

View file

@ -0,0 +1,38 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
* Helper class for dma-heap allocations.
*/
#pragma once
#include <stddef.h>
#include <libcamera/base/flags.h>
#include <libcamera/base/unique_fd.h>
namespace libcamera {
class DmaHeap
{
public:
enum class DmaHeapFlag {
Cma = 1 << 0,
System = 1 << 1,
};
using DmaHeapFlags = Flags<DmaHeapFlag>;
DmaHeap(DmaHeapFlags flags = DmaHeapFlag::Cma);
~DmaHeap();
bool isValid() const { return dmaHeapHandle_.isValid(); }
UniqueFD alloc(const char *name, std::size_t size);
private:
UniqueFD dmaHeapHandle_;
};
LIBCAMERA_FLAGS_ENABLE_OPERATORS(DmaHeap::DmaHeapFlag)
} /* namespace libcamera */

View file

@ -8,6 +8,7 @@
#pragma once
#include <array>
#include <map>
#include <vector>
#include <libcamera/geometry.h>

View file

@ -8,7 +8,6 @@
#pragma once
#include <memory>
#include <stdint.h>
#include <utility>
#include <libcamera/base/class.h>

View file

@ -7,7 +7,8 @@
#pragma once
#include <stdint.h>
#include <deque>
#include <iostream>
#include <string.h>
#include <tuple>
#include <type_traits>
@ -19,9 +20,10 @@
#include <libcamera/control_ids.h>
#include <libcamera/framebuffer.h>
#include <libcamera/geometry.h>
#include <libcamera/ipa/ipa_interface.h>
#include "libcamera/internal/byte_stream_buffer.h"
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/control_serializer.h"
namespace libcamera {
@ -309,6 +311,7 @@ public:
serialize(const Flags<E> &data, [[maybe_unused]] ControlSerializer *cs = nullptr)
{
std::vector<uint8_t> dataVec;
dataVec.reserve(sizeof(Flags<E>));
appendPOD<uint32_t>(dataVec, static_cast<typename Flags<E>::Type>(data));
return { dataVec, {} };

View file

@ -7,7 +7,6 @@
#pragma once
#include <memory>
#include <stdint.h>
#include <vector>
@ -16,7 +15,6 @@
#include <libcamera/ipa/ipa_interface.h>
#include <libcamera/ipa/ipa_module_info.h>
#include "libcamera/internal/camera_manager.h"
#include "libcamera/internal/ipa_module.h"
#include "libcamera/internal/pipeline_handler.h"
#include "libcamera/internal/pub_key.h"
@ -36,13 +34,11 @@ public:
uint32_t minVersion,
uint32_t maxVersion)
{
CameraManager *cm = pipe->cameraManager();
IPAManager *self = cm->_d()->ipaManager();
IPAModule *m = self->module(pipe, minVersion, maxVersion);
IPAModule *m = self_->module(pipe, minVersion, maxVersion);
if (!m)
return nullptr;
std::unique_ptr<T> proxy = std::make_unique<T>(m, !self->isSignatureValid(m));
std::unique_ptr<T> proxy = std::make_unique<T>(m, !self_->isSignatureValid(m));
if (!proxy->isValid()) {
LOG(IPAManager, Error) << "Failed to load proxy";
return nullptr;
@ -59,6 +55,8 @@ public:
#endif
private:
static IPAManager *self_;
void parseDir(const char *libDir, unsigned int maxDepth,
std::vector<std::string> &files);
unsigned int addDir(const char *libDir, unsigned int maxDepth = 0);
@ -68,7 +66,7 @@ private:
bool isSignatureValid(IPAModule *ipa) const;
std::vector<std::unique_ptr<IPAModule>> modules_;
std::vector<IPAModule *> modules_;
#if HAVE_IPA_PUBKEY
static const uint8_t publicKeyData_[];

View file

@ -29,7 +29,7 @@ public:
bool isValid() const;
const struct IPAModuleInfo &info() const;
const std::vector<uint8_t> &signature() const;
const std::vector<uint8_t> signature() const;
const std::string &path() const;
bool load();

View file

@ -7,7 +7,9 @@
#pragma once
#include <memory>
#include <string>
#include <vector>
#include <libcamera/ipa/ipa_interface.h>
@ -29,8 +31,7 @@ public:
bool isValid() const { return valid_; }
std::string configurationFile(const std::string &name,
const std::string &fallbackName = std::string()) const;
std::string configurationFile(const std::string &file) const;
protected:
std::string resolvePath(const std::string &file) const;

View file

@ -7,7 +7,6 @@
#pragma once
#include <stdint.h>
#include <vector>
#include <libcamera/base/shared_fd.h>

View file

@ -9,7 +9,7 @@
#include <map>
#include <memory>
#include <stdint.h>
#include <vector>
#include "libcamera/internal/ipc_pipe.h"
#include "libcamera/internal/ipc_unixsocket.h"

View file

@ -1,226 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
*
* Matrix and related operations
*/
#pragma once
#include <algorithm>
#include <sstream>
#include <type_traits>
#include <vector>
#include <libcamera/base/log.h>
#include <libcamera/base/span.h>
#include "libcamera/internal/yaml_parser.h"
namespace libcamera {
LOG_DECLARE_CATEGORY(Matrix)
#ifndef __DOXYGEN__
template<typename T>
bool matrixInvert(Span<const T> dataIn, Span<T> dataOut, unsigned int dim,
Span<T> scratchBuffer, Span<unsigned int> swapBuffer);
#endif /* __DOXYGEN__ */
template<typename T, unsigned int Rows, unsigned int Cols>
class Matrix
{
static_assert(std::is_arithmetic_v<T>, "Matrix type must be arithmetic");
public:
constexpr Matrix()
{
}
Matrix(const std::array<T, Rows * Cols> &data)
{
std::copy(data.begin(), data.end(), data_.begin());
}
Matrix(const Span<const T, Rows * Cols> data)
{
std::copy(data.begin(), data.end(), data_.begin());
}
static constexpr Matrix identity()
{
Matrix ret;
for (size_t i = 0; i < std::min(Rows, Cols); i++)
ret[i][i] = static_cast<T>(1);
return ret;
}
~Matrix() = default;
const std::string toString() const
{
std::stringstream out;
out << "Matrix { ";
for (unsigned int i = 0; i < Rows; i++) {
out << "[ ";
for (unsigned int j = 0; j < Cols; j++) {
out << (*this)[i][j];
out << ((j + 1 < Cols) ? ", " : " ");
}
out << ((i + 1 < Rows) ? "], " : "]");
}
out << " }";
return out.str();
}
constexpr Span<const T, Rows * Cols> data() const { return data_; }
constexpr Span<const T, Cols> operator[](size_t i) const
{
return Span<const T, Cols>{ &data_.data()[i * Cols], Cols };
}
constexpr Span<T, Cols> operator[](size_t i)
{
return Span<T, Cols>{ &data_.data()[i * Cols], Cols };
}
#ifndef __DOXYGEN__
template<typename U, std::enable_if_t<std::is_arithmetic_v<U>>>
#else
template<typename U>
#endif /* __DOXYGEN__ */
Matrix<T, Rows, Cols> &operator*=(U d)
{
for (unsigned int i = 0; i < Rows * Cols; i++)
data_[i] *= d;
return *this;
}
Matrix<T, Rows, Cols> inverse(bool *ok = nullptr) const
{
static_assert(Rows == Cols, "Matrix must be square");
Matrix<T, Rows, Cols> inverse;
std::array<T, Rows * Cols * 2> scratchBuffer;
std::array<unsigned int, Rows> swapBuffer;
bool res = matrixInvert(Span<const T>(data_),
Span<T>(inverse.data_),
Rows,
Span<T>(scratchBuffer),
Span<unsigned int>(swapBuffer));
if (ok)
*ok = res;
return inverse;
}
private:
/*
* \todo The initializer is only necessary for the constructor to be
* constexpr in C++17. Remove the initializer as soon as we are on
* C++20.
*/
std::array<T, Rows * Cols> data_ = {};
};
#ifndef __DOXYGEN__
template<typename T, typename U, unsigned int Rows, unsigned int Cols,
std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr>
#else
template<typename T, typename U, unsigned int Rows, unsigned int Cols>
#endif /* __DOXYGEN__ */
Matrix<U, Rows, Cols> operator*(T d, const Matrix<U, Rows, Cols> &m)
{
Matrix<U, Rows, Cols> result;
for (unsigned int i = 0; i < Rows; i++) {
for (unsigned int j = 0; j < Cols; j++)
result[i][j] = d * m[i][j];
}
return result;
}
#ifndef __DOXYGEN__
template<typename T, typename U, unsigned int Rows, unsigned int Cols,
std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr>
#else
template<typename T, typename U, unsigned int Rows, unsigned int Cols>
#endif /* __DOXYGEN__ */
Matrix<U, Rows, Cols> operator*(const Matrix<U, Rows, Cols> &m, T d)
{
return d * m;
}
template<typename T1, unsigned int R1, unsigned int C1, typename T2, unsigned int R2, unsigned int C2>
constexpr Matrix<std::common_type_t<T1, T2>, R1, C2> operator*(const Matrix<T1, R1, C1> &m1,
const Matrix<T2, R2, C2> &m2)
{
static_assert(C1 == R2, "Matrix dimensions must match for multiplication");
Matrix<std::common_type_t<T1, T2>, R1, C2> result;
for (unsigned int i = 0; i < R1; i++) {
for (unsigned int j = 0; j < C2; j++) {
std::common_type_t<T1, T2> sum = 0;
for (unsigned int k = 0; k < C1; k++)
sum += m1[i][k] * m2[k][j];
result[i][j] = sum;
}
}
return result;
}
template<typename T, unsigned int Rows, unsigned int Cols>
constexpr Matrix<T, Rows, Cols> operator+(const Matrix<T, Rows, Cols> &m1, const Matrix<T, Rows, Cols> &m2)
{
Matrix<T, Rows, Cols> result;
for (unsigned int i = 0; i < Rows; i++) {
for (unsigned int j = 0; j < Cols; j++)
result[i][j] = m1[i][j] + m2[i][j];
}
return result;
}
#ifndef __DOXYGEN__
bool matrixValidateYaml(const YamlObject &obj, unsigned int size);
#endif /* __DOXYGEN__ */
#ifndef __DOXYGEN__
template<typename T, unsigned int Rows, unsigned int Cols>
std::ostream &operator<<(std::ostream &out, const Matrix<T, Rows, Cols> &m)
{
out << m.toString();
return out;
}
template<typename T, unsigned int Rows, unsigned int Cols>
struct YamlObject::Getter<Matrix<T, Rows, Cols>> {
std::optional<Matrix<T, Rows, Cols>> get(const YamlObject &obj) const
{
if (!matrixValidateYaml(obj, Rows * Cols))
return std::nullopt;
Matrix<T, Rows, Cols> matrix;
T *data = &matrix[0][0];
unsigned int i = 0;
for (const YamlObject &entry : obj.asList()) {
const auto value = entry.get<T>();
if (!value)
return std::nullopt;
data[i++] = *value;
}
return matrix;
}
};
#endif /* __DOXYGEN__ */
} /* namespace libcamera */

View file

@ -8,6 +8,7 @@
#pragma once
#include <map>
#include <sstream>
#include <string>
#include <vector>
@ -55,8 +56,6 @@ public:
Signal<> disconnected;
std::vector<MediaEntity *> locateEntities(unsigned int function);
protected:
std::string logPrefix() const override;

View file

@ -48,8 +48,6 @@ public:
unsigned int flags() const { return flags_; }
int setEnabled(bool enable);
std::string toString() const;
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(MediaLink)
@ -63,8 +61,6 @@ private:
unsigned int flags_;
};
std::ostream &operator<<(std::ostream &out, const MediaLink &link);
class MediaPad : public MediaObject
{
public:
@ -75,8 +71,6 @@ public:
void addLink(MediaLink *link);
std::string toString() const;
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(MediaPad)
@ -91,8 +85,6 @@ private:
std::vector<MediaLink *> links_;
};
std::ostream &operator<<(std::ostream &out, const MediaPad &pad);
class MediaEntity : public MediaObject
{
public:
@ -112,7 +104,7 @@ public:
unsigned int deviceMinor() const { return minor_; }
const std::vector<MediaPad *> &pads() const { return pads_; }
const std::vector<MediaEntity *> &ancillaryEntities() const { return ancillaryEntities_; }
const std::vector<MediaEntity *> ancillaryEntities() const { return ancillaryEntities_; }
const MediaPad *getPadByIndex(unsigned int index) const;
const MediaPad *getPadById(unsigned int id) const;

View file

@ -1,59 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2024, Ideas on Board Oy
*
* Media pipeline support
*/
#pragma once
#include <list>
#include <string>
#include <libcamera/base/log.h>
namespace libcamera {
class CameraSensor;
class MediaEntity;
class MediaLink;
class MediaPad;
struct V4L2SubdeviceFormat;
class MediaPipeline
{
public:
int init(MediaEntity *source, std::string_view sink);
int initLinks();
int configure(CameraSensor *sensor, V4L2SubdeviceFormat *);
private:
struct Entity {
/* The media entity, always valid. */
MediaEntity *entity;
/*
* Whether or not the entity is a subdev that supports the
* routing API.
*/
bool supportsRouting;
/*
* The local sink pad connected to the upstream entity, null for
* the camera sensor at the beginning of the pipeline.
*/
const MediaPad *sink;
/*
* The local source pad connected to the downstream entity, null
* for the video node at the end of the pipeline.
*/
const MediaPad *source;
/*
* The link on the source pad, to the downstream entity, null
* for the video node at the end of the pipeline.
*/
MediaLink *sourceLink;
};
std::list<Entity> entities_;
};
} /* namespace libcamera */

View file

@ -2,6 +2,13 @@
subdir('tracepoints')
libcamera_tracepoint_header = custom_target(
'tp_header',
input : ['tracepoints.h.in', tracepoint_files],
output : 'tracepoints.h',
command : [gen_tracepoints_header, include_build_dir, '@OUTPUT@', '@INPUT@'],
)
libcamera_internal_headers = files([
'bayer_format.h',
'byte_stream_buffer.h',
@ -11,29 +18,23 @@ libcamera_internal_headers = files([
'camera_manager.h',
'camera_sensor.h',
'camera_sensor_properties.h',
'clock_recovery.h',
'control_serializer.h',
'control_validator.h',
'converter.h',
'debug_controls.h',
'delayed_controls.h',
'device_enumerator.h',
'device_enumerator_sysfs.h',
'device_enumerator_udev.h',
'dma_buf_allocator.h',
'dma_heaps.h',
'formats.h',
'framebuffer.h',
'ipa_data_serializer.h',
'ipa_manager.h',
'ipa_module.h',
'ipa_proxy.h',
'ipc_pipe.h',
'ipc_unixsocket.h',
'mapped_framebuffer.h',
'matrix.h',
'media_device.h',
'media_object.h',
'media_pipeline.h',
'pipeline_handler.h',
'process.h',
'pub_key.h',
@ -45,18 +46,8 @@ libcamera_internal_headers = files([
'v4l2_pixelformat.h',
'v4l2_subdevice.h',
'v4l2_videodevice.h',
'vector.h',
'yaml_parser.h',
])
tracepoints_h = custom_target(
'tp_header',
input : ['tracepoints.h.in', tracepoint_files],
output : 'tracepoints.h',
command : [gen_tracepoints, include_build_dir, '@OUTPUT@', '@INPUT@'],
)
libcamera_internal_headers += tracepoints_h
subdir('converter')
subdir('software_isp')

View file

@ -9,15 +9,19 @@
#include <memory>
#include <queue>
#include <set>
#include <string>
#include <sys/types.h>
#include <vector>
#include <libcamera/base/mutex.h>
#include <libcamera/base/object.h>
#include <libcamera/controls.h>
#include <libcamera/stream.h>
#include "libcamera/internal/ipa_proxy.h"
namespace libcamera {
class Camera;
@ -41,7 +45,7 @@ public:
MediaDevice *acquireMediaDevice(DeviceEnumerator *enumerator,
const DeviceMatch &dm);
bool acquire(Camera *camera);
bool acquire();
void release(Camera *camera);
virtual std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
@ -60,16 +64,12 @@ public:
bool completeBuffer(Request *request, FrameBuffer *buffer);
void completeRequest(Request *request);
void cancelRequest(Request *request);
std::string configurationFile(const std::string &subdir,
const std::string &name,
bool silent = false) const;
const std::string &name) const;
const char *name() const { return name_; }
CameraManager *cameraManager() const { return manager_; }
protected:
void registerCamera(std::shared_ptr<Camera> camera);
void hotplugMediaDevice(MediaDevice *media);
@ -77,7 +77,6 @@ protected:
virtual int queueRequestDevice(Camera *camera, Request *request) = 0;
virtual void stopDevice(Camera *camera) = 0;
virtual bool acquireDevice(Camera *camera);
virtual void releaseDevice(Camera *camera);
CameraManager *manager_;
@ -97,7 +96,9 @@ private:
std::queue<Request *> waitingRequests_;
const char *name_;
unsigned int useCount_;
Mutex lock_;
unsigned int useCount_ LIBCAMERA_TSA_GUARDED_BY(lock_);
friend class PipelineHandlerFactoryBase;
};

View file

@ -11,7 +11,6 @@
#include <string>
#include <vector>
#include <libcamera/base/class.h>
#include <libcamera/base/signal.h>
#include <libcamera/base/unique_fd.h>
@ -43,8 +42,6 @@ public:
Signal<enum ExitStatus, int> finished;
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(Process)
void closeAllFdsExcept(const std::vector<int> &fds);
int isolate();
void died(int wstatus);

View file

@ -10,8 +10,6 @@
#include <chrono>
#include <map>
#include <memory>
#include <stdint.h>
#include <unordered_set>
#include <libcamera/base/event_notifier.h>
#include <libcamera/base/timer.h>

View file

@ -8,6 +8,7 @@
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <string>
#include <sys/mman.h>

View file

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2023-2025 Red Hat Inc.
* Copyright (C) 2023, Red Hat Inc.
*
* Authors:
* Hans de Goede <hdegoede@redhat.com>
@ -10,45 +10,20 @@
#pragma once
#include <array>
#include <stdint.h>
namespace libcamera {
struct DebayerParams {
static constexpr unsigned int kRGBLookupSize = 256;
static constexpr unsigned int kGain10 = 256;
struct CcmColumn {
int16_t r;
int16_t g;
int16_t b;
};
unsigned int gainR;
unsigned int gainG;
unsigned int gainB;
using LookupTable = std::array<uint8_t, kRGBLookupSize>;
using CcmLookupTable = std::array<CcmColumn, kRGBLookupSize>;
/*
* Color lookup tables when CCM is not used.
*
* Each color of a debayered pixel is amended by the corresponding
* value in the given table.
float gamma;
/**
* \brief Level of the black point, 0..255, 0 is no correction.
*/
LookupTable red;
LookupTable green;
LookupTable blue;
/*
* Color and gamma lookup tables when CCM is used.
*
* Each of the CcmLookupTable's corresponds to a CCM column; together they
* make a complete 3x3 CCM lookup table. The CCM is applied on debayered
* pixels and then the gamma lookup table is used to set the resulting
* values of all the three colors.
*/
CcmLookupTable redCcm;
CcmLookupTable greenCcm;
CcmLookupTable blueCcm;
LookupTable gammaLut;
unsigned int blackLevel;
};
} /* namespace libcamera */

View file

@ -7,19 +7,16 @@
#pragma once
#include <deque>
#include <functional>
#include <initializer_list>
#include <map>
#include <memory>
#include <stdint.h>
#include <string>
#include <tuple>
#include <vector>
#include <libcamera/base/class.h>
#include <libcamera/base/log.h>
#include <libcamera/base/object.h>
#include <libcamera/base/signal.h>
#include <libcamera/base/thread.h>
@ -30,7 +27,7 @@
#include <libcamera/ipa/soft_ipa_proxy.h>
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/dma_buf_allocator.h"
#include "libcamera/internal/dma_heaps.h"
#include "libcamera/internal/pipeline_handler.h"
#include "libcamera/internal/shared_mem_object.h"
#include "libcamera/internal/software_isp/debayer_params.h"
@ -40,16 +37,14 @@ namespace libcamera {
class DebayerCpu;
class FrameBuffer;
class PixelFormat;
class Stream;
struct StreamConfiguration;
LOG_DECLARE_CATEGORY(SoftwareIsp)
class SoftwareIsp : public Object
class SoftwareIsp
{
public:
SoftwareIsp(PipelineHandler *pipe, const CameraSensor *sensor,
ControlInfoMap *ipaControls);
SoftwareIsp(PipelineHandler *pipe, const CameraSensor *sensor);
~SoftwareIsp();
int loadConfiguration([[maybe_unused]] const std::string &filename) { return 0; }
@ -65,33 +60,30 @@ public:
int configure(const StreamConfiguration &inputCfg,
const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs,
const ipa::soft::IPAConfigInfo &configInfo);
const ControlInfoMap &sensorControls);
int exportBuffers(const Stream *stream, unsigned int count,
int exportBuffers(unsigned int output, unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers);
void processStats(const uint32_t frame, const uint32_t bufferId,
const ControlList &sensorControls);
void processStats(const ControlList &sensorControls);
int start();
void stop();
void queueRequest(const uint32_t frame, const ControlList &controls);
int queueBuffers(uint32_t frame, FrameBuffer *input,
const std::map<const Stream *, FrameBuffer *> &outputs);
int queueBuffers(FrameBuffer *input,
const std::map<unsigned int, FrameBuffer *> &outputs);
void process(uint32_t frame, FrameBuffer *input, FrameBuffer *output);
void process(FrameBuffer *input, FrameBuffer *output);
Signal<FrameBuffer *> inputBufferReady;
Signal<FrameBuffer *> outputBufferReady;
Signal<uint32_t, uint32_t> ispStatsReady;
Signal<uint32_t, const ControlList &> metadataReady;
Signal<> ispStatsReady;
Signal<const ControlList &> setSensorControls;
private:
void saveIspParams();
void setSensorCtrls(const ControlList &sensorControls);
void statsReady(uint32_t frame, uint32_t bufferId);
void statsReady();
void inputReady(FrameBuffer *input);
void outputReady(FrameBuffer *output);
@ -99,12 +91,9 @@ private:
Thread ispWorkerThread_;
SharedMemObject<DebayerParams> sharedParams_;
DebayerParams debayerParams_;
DmaBufAllocator dmaHeap_;
bool ccmEnabled_;
DmaHeap dmaHeap_;
std::unique_ptr<ipa::soft::IPAProxySoft> ipa_;
std::deque<FrameBuffer *> queuedInputBuffers_;
std::deque<FrameBuffer *> queuedOutputBuffers_;
};
} /* namespace libcamera */

View file

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Google Inc.
* Copyright (C) {{year}}, Google Inc.
*
* Tracepoints with lttng
*

View file

@ -5,8 +5,6 @@
* request.tp - Tracepoints for the request object
*/
#include <stdint.h>
#include <libcamera/framebuffer.h>
#include "libcamera/internal/request.h"

View file

@ -10,7 +10,6 @@
#include <map>
#include <memory>
#include <optional>
#include <stdint.h>
#include <vector>
#include <linux/videodev2.h>
@ -45,7 +44,6 @@ public:
const std::string &deviceNode() const { return deviceNode_; }
std::string devicePath() const;
bool supportsFrameStartEvent();
int setFrameStartEnabled(bool enable);
Signal<uint32_t> frameStart;

View file

@ -49,8 +49,6 @@ public:
static const std::vector<V4L2PixelFormat> &
fromPixelFormat(const PixelFormat &pixelFormat);
bool isGenericLineBasedMetadata() const;
private:
uint32_t fourcc_;
};

View file

@ -10,7 +10,6 @@
#include <memory>
#include <optional>
#include <ostream>
#include <stdint.h>
#include <string>
#include <vector>
@ -177,9 +176,6 @@ private:
std::vector<SizeRange> enumPadSizes(const Stream &stream,
unsigned int code);
int getRoutingLegacy(Routing *routing, Whence whence);
int setRoutingLegacy(Routing *routing, Whence whence);
const MediaEntity *entity_;
std::string model_;

View file

@ -8,6 +8,7 @@
#pragma once
#include <array>
#include <atomic>
#include <memory>
#include <optional>
#include <ostream>
@ -157,7 +158,7 @@ private:
std::vector<Plane> planes_;
};
uint64_t lastUsedCounter_;
std::atomic<uint64_t> lastUsedCounter_;
std::vector<Entry> cache_;
/* \todo Expose the miss counter through an instrumentation API. */
unsigned int missCounter_;
@ -207,7 +208,6 @@ public:
int setFormat(V4L2DeviceFormat *format);
Formats formats(uint32_t code = 0);
int getSelection(unsigned int target, Rectangle *rect);
int setSelection(unsigned int target, Rectangle *rect);
int allocateBuffers(unsigned int count,

View file

@ -1,371 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
*
* Vector and related operations
*/
#pragma once
#include <algorithm>
#include <array>
#include <cmath>
#include <functional>
#include <numeric>
#include <optional>
#include <ostream>
#include <type_traits>
#include <libcamera/base/log.h>
#include <libcamera/base/span.h>
#include "libcamera/internal/matrix.h"
#include "libcamera/internal/yaml_parser.h"
namespace libcamera {
LOG_DECLARE_CATEGORY(Vector)
#ifndef __DOXYGEN__
template<typename T, unsigned int Rows,
std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr>
#else
template<typename T, unsigned int Rows>
#endif /* __DOXYGEN__ */
class Vector
{
public:
constexpr Vector() = default;
constexpr explicit Vector(T scalar)
{
data_.fill(scalar);
}
constexpr Vector(const std::array<T, Rows> &data)
{
std::copy(data.begin(), data.end(), data_.begin());
}
constexpr Vector(const Span<const T, Rows> data)
{
std::copy(data.begin(), data.end(), data_.begin());
}
const T &operator[](size_t i) const
{
ASSERT(i < data_.size());
return data_[i];
}
T &operator[](size_t i)
{
ASSERT(i < data_.size());
return data_[i];
}
constexpr Vector<T, Rows> operator-() const
{
Vector<T, Rows> ret;
for (unsigned int i = 0; i < Rows; i++)
ret[i] = -data_[i];
return ret;
}
constexpr Vector operator+(const Vector &other) const
{
return apply(*this, other, std::plus<>{});
}
constexpr Vector operator+(T scalar) const
{
return apply(*this, scalar, std::plus<>{});
}
constexpr Vector operator-(const Vector &other) const
{
return apply(*this, other, std::minus<>{});
}
constexpr Vector operator-(T scalar) const
{
return apply(*this, scalar, std::minus<>{});
}
constexpr Vector operator*(const Vector &other) const
{
return apply(*this, other, std::multiplies<>{});
}
constexpr Vector operator*(T scalar) const
{
return apply(*this, scalar, std::multiplies<>{});
}
constexpr Vector operator/(const Vector &other) const
{
return apply(*this, other, std::divides<>{});
}
constexpr Vector operator/(T scalar) const
{
return apply(*this, scalar, std::divides<>{});
}
Vector &operator+=(const Vector &other)
{
return apply(other, [](T a, T b) { return a + b; });
}
Vector &operator+=(T scalar)
{
return apply(scalar, [](T a, T b) { return a + b; });
}
Vector &operator-=(const Vector &other)
{
return apply(other, [](T a, T b) { return a - b; });
}
Vector &operator-=(T scalar)
{
return apply(scalar, [](T a, T b) { return a - b; });
}
Vector &operator*=(const Vector &other)
{
return apply(other, [](T a, T b) { return a * b; });
}
Vector &operator*=(T scalar)
{
return apply(scalar, [](T a, T b) { return a * b; });
}
Vector &operator/=(const Vector &other)
{
return apply(other, [](T a, T b) { return a / b; });
}
Vector &operator/=(T scalar)
{
return apply(scalar, [](T a, T b) { return a / b; });
}
constexpr Vector min(const Vector &other) const
{
return apply(*this, other, [](T a, T b) { return std::min(a, b); });
}
constexpr Vector min(T scalar) const
{
return apply(*this, scalar, [](T a, T b) { return std::min(a, b); });
}
constexpr Vector max(const Vector &other) const
{
return apply(*this, other, [](T a, T b) { return std::max(a, b); });
}
constexpr Vector max(T scalar) const
{
return apply(*this, scalar, [](T a, T b) -> T { return std::max(a, b); });
}
constexpr T dot(const Vector<T, Rows> &other) const
{
T ret = 0;
for (unsigned int i = 0; i < Rows; i++)
ret += data_[i] * other[i];
return ret;
}
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 1>>
#endif /* __DOXYGEN__ */
constexpr const T &x() const { return data_[0]; }
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 2>>
#endif /* __DOXYGEN__ */
constexpr const T &y() const { return data_[1]; }
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 3>>
#endif /* __DOXYGEN__ */
constexpr const T &z() const { return data_[2]; }
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 1>>
#endif /* __DOXYGEN__ */
constexpr T &x() { return data_[0]; }
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 2>>
#endif /* __DOXYGEN__ */
constexpr T &y() { return data_[1]; }
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 3>>
#endif /* __DOXYGEN__ */
constexpr T &z() { return data_[2]; }
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 1>>
#endif /* __DOXYGEN__ */
constexpr const T &r() const { return data_[0]; }
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 2>>
#endif /* __DOXYGEN__ */
constexpr const T &g() const { return data_[1]; }
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 3>>
#endif /* __DOXYGEN__ */
constexpr const T &b() const { return data_[2]; }
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 1>>
#endif /* __DOXYGEN__ */
constexpr T &r() { return data_[0]; }
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 2>>
#endif /* __DOXYGEN__ */
constexpr T &g() { return data_[1]; }
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 3>>
#endif /* __DOXYGEN__ */
constexpr T &b() { return data_[2]; }
constexpr double length2() const
{
double ret = 0;
for (unsigned int i = 0; i < Rows; i++)
ret += data_[i] * data_[i];
return ret;
}
constexpr double length() const
{
return std::sqrt(length2());
}
template<typename R = T>
constexpr R sum() const
{
return std::accumulate(data_.begin(), data_.end(), R{});
}
private:
template<class BinaryOp>
static constexpr Vector apply(const Vector &lhs, const Vector &rhs, BinaryOp op)
{
Vector result;
std::transform(lhs.data_.begin(), lhs.data_.end(),
rhs.data_.begin(), result.data_.begin(),
op);
return result;
}
template<class BinaryOp>
static constexpr Vector apply(const Vector &lhs, T rhs, BinaryOp op)
{
Vector result;
std::transform(lhs.data_.begin(), lhs.data_.end(),
result.data_.begin(),
[&op, rhs](T v) { return op(v, rhs); });
return result;
}
template<class BinaryOp>
Vector &apply(const Vector &other, BinaryOp op)
{
auto itOther = other.data_.begin();
std::for_each(data_.begin(), data_.end(),
[&op, &itOther](T &v) { v = op(v, *itOther++); });
return *this;
}
template<class BinaryOp>
Vector &apply(T scalar, BinaryOp op)
{
std::for_each(data_.begin(), data_.end(),
[&op, scalar](T &v) { v = op(v, scalar); });
return *this;
}
std::array<T, Rows> data_;
};
template<typename T>
using RGB = Vector<T, 3>;
template<typename T, typename U, unsigned int Rows, unsigned int Cols>
Vector<std::common_type_t<T, U>, Rows> operator*(const Matrix<T, Rows, Cols> &m, const Vector<U, Cols> &v)
{
Vector<std::common_type_t<T, U>, Rows> result;
for (unsigned int i = 0; i < Rows; i++) {
std::common_type_t<T, U> sum = 0;
for (unsigned int j = 0; j < Cols; j++)
sum += m[i][j] * v[j];
result[i] = sum;
}
return result;
}
template<typename T, unsigned int Rows>
bool operator==(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs)
{
for (unsigned int i = 0; i < Rows; i++) {
if (lhs[i] != rhs[i])
return false;
}
return true;
}
template<typename T, unsigned int Rows>
bool operator!=(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs)
{
return !(lhs == rhs);
}
#ifndef __DOXYGEN__
bool vectorValidateYaml(const YamlObject &obj, unsigned int size);
#endif /* __DOXYGEN__ */
#ifndef __DOXYGEN__
template<typename T, unsigned int Rows>
std::ostream &operator<<(std::ostream &out, const Vector<T, Rows> &v)
{
out << "Vector { ";
for (unsigned int i = 0; i < Rows; i++) {
out << v[i];
out << ((i + 1 < Rows) ? ", " : " ");
}
out << " }";
return out;
}
template<typename T, unsigned int Rows>
struct YamlObject::Getter<Vector<T, Rows>> {
std::optional<Vector<T, Rows>> get(const YamlObject &obj) const
{
if (!vectorValidateYaml(obj, Rows))
return std::nullopt;
Vector<T, Rows> vector;
unsigned int i = 0;
for (const YamlObject &entry : obj.asList()) {
const auto value = entry.get<T>();
if (!value)
return std::nullopt;
vector[i++] = *value;
}
return vector;
}
};
#endif /* __DOXYGEN__ */
} /* namespace libcamera */

View file

@ -10,9 +10,7 @@
#include <iterator>
#include <map>
#include <optional>
#include <stdint.h>
#include <string>
#include <string_view>
#include <vector>
#include <libcamera/base/class.h>
@ -160,34 +158,37 @@ public:
{
return type_ == Type::Dictionary;
}
bool isEmpty() const
{
return type_ == Type::Empty;
}
explicit operator bool() const
{
return type_ != Type::Empty;
}
std::size_t size() const;
#ifndef __DOXYGEN__
template<typename T,
std::enable_if_t<
std::is_same_v<bool, T> ||
std::is_same_v<double, T> ||
std::is_same_v<int8_t, T> ||
std::is_same_v<uint8_t, T> ||
std::is_same_v<int16_t, T> ||
std::is_same_v<uint16_t, T> ||
std::is_same_v<int32_t, T> ||
std::is_same_v<uint32_t, T> ||
std::is_same_v<std::string, T> ||
std::is_same_v<Size, T>> * = nullptr>
#else
template<typename T>
std::optional<T> get() const
{
return Getter<T>{}.get(*this);
}
#endif
std::optional<T> get() const;
template<typename T, typename U>
T get(U &&defaultValue) const
template<typename T>
T get(const T &defaultValue) const
{
return get<T>().value_or(std::forward<U>(defaultValue));
return get<T>().value_or(defaultValue);
}
#ifndef __DOXYGEN__
template<typename T,
std::enable_if_t<
std::is_same_v<bool, T> ||
std::is_same_v<float, T> ||
std::is_same_v<double, T> ||
std::is_same_v<int8_t, T> ||
std::is_same_v<uint8_t, T> ||
@ -207,33 +208,25 @@ public:
const YamlObject &operator[](std::size_t index) const;
bool contains(std::string_view key) const;
const YamlObject &operator[](std::string_view key) const;
bool contains(const std::string &key) const;
const YamlObject &operator[](const std::string &key) const;
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(YamlObject)
template<typename T>
friend struct Getter;
friend class YamlParserContext;
enum class Type {
Dictionary,
List,
Value,
Empty,
};
template<typename T, typename Enable = void>
struct Getter {
std::optional<T> get(const YamlObject &obj) const;
};
Type type_;
std::string value_;
Container list_;
std::map<std::string, YamlObject *, std::less<>> dictionary_;
std::map<std::string, YamlObject *> dictionary_;
};
class YamlParser final

View file

@ -46,8 +46,7 @@ struct ipa_control_info_entry {
uint32_t id;
uint32_t type;
uint32_t offset;
uint8_t direction;
uint8_t padding[3];
uint32_t padding[1];
};
#ifdef __cplusplus

View file

@ -7,6 +7,19 @@
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <map>
#include <vector>
#include <libcamera/base/flags.h>
#include <libcamera/base/signal.h>
#include <libcamera/controls.h>
#include <libcamera/framebuffer.h>
#include <libcamera/geometry.h>
namespace libcamera {
/*
@ -20,8 +33,8 @@ public:
virtual ~IPAInterface() = default;
};
} /* namespace libcamera */
extern "C" {
libcamera::IPAInterface *ipaCreate();
}
} /* namespace libcamera */

View file

@ -31,14 +31,14 @@ interface IPAIPU3Interface {
unmapBuffers(array<uint32> ids);
[async] queueRequest(uint32 frame, libcamera.ControlList controls);
[async] computeParams(uint32 frame, uint32 bufferId);
[async] processStats(uint32 frame, int64 frameTimestamp,
[async] fillParamsBuffer(uint32 frame, uint32 bufferId);
[async] processStatsBuffer(uint32 frame, int64 frameTimestamp,
uint32 bufferId, libcamera.ControlList sensorControls);
};
interface IPAIPU3EventInterface {
setSensorControls(uint32 frame, libcamera.ControlList sensorControls,
libcamera.ControlList lensControls);
paramsComputed(uint32 frame);
paramsBufferReady(uint32 frame);
metadataReady(uint32 frame, libcamera.ControlList metadata);
};

Some files were not shown because too many files have changed in this diff Show more