Compare commits

..

No commits in common. "master" and "v0.1.0" have entirely different histories.

1047 changed files with 14238 additions and 100576 deletions

View file

@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# clang-format configuration file. Intended for clang-format >= 12.
# clang-format configuration file. Intended for clang-format >= 7.
#
# For more information, see:
#
@ -66,6 +66,7 @@ ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
ForEachMacros:
- 'udev_list_entry_foreach'
SortIncludes: true
IncludeBlocks: Regroup
IncludeCategories:
# Headers matching the name of the component are matched automatically.
@ -75,7 +76,6 @@ IncludeCategories:
Priority: 9
# Qt includes (match before C++ standard library)
- Regex: '<Q([A-Za-z0-9\-_])+>'
CaseSensitive: true
Priority: 9
# Headers in <> with an extension. (+system libraries)
- Regex: '<([A-Za-z0-9\-_])+\.h>'

View file

@ -1,29 +0,0 @@
# SPDX-License-Identifier: CC0-1.0
root = true
[*]
charset = utf-8
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
[*.{cpp,h}]
indent_size = 8
indent_style = tab
[*.json]
indent_size = 4
indent_style = space
[*.py]
indent_size = 4
indent_style = space
[*.yaml]
indent_size = 2
indent_style = space
[{meson.build,meson_options.txt}]
indent_size = 4
indent_style = space

1
.gitignore vendored
View file

@ -6,4 +6,3 @@
*.patch
*.pyc
__pycache__/
venv/

View file

@ -3,12 +3,6 @@ Upstream-Name: libcamera
Upstream-Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Source: https://git.libcamera.org/libcamera/libcamera.git/
Files: Documentation/binning.svg
Documentation/camera-sensor-model.rst
Documentation/sensor_model.svg
Copyright: Copyright 2023 Ideas On Board Oy
License: CC-BY-SA-4.0
Files: Documentation/theme/static/search.png
Copyright: 2022 Fonticons, Inc.
License: CC-BY-4.0

View file

@ -1,33 +0,0 @@
# SPDX-License-Identifier: CC-BY-SA-4.0
@INCLUDE_PATH = @TOP_BUILDDIR@/Documentation
@INCLUDE = Doxyfile-common
HIDE_UNDOC_CLASSES = NO
HIDE_UNDOC_MEMBERS = NO
HTML_OUTPUT = internal-api-html
INTERNAL_DOCS = YES
ENABLED_SECTIONS = internal
INPUT = "@TOP_SRCDIR@/Documentation" \
"@TOP_SRCDIR@/include/libcamera" \
"@TOP_SRCDIR@/src/ipa/ipu3" \
"@TOP_SRCDIR@/src/ipa/libipa" \
"@TOP_SRCDIR@/src/libcamera" \
"@TOP_BUILDDIR@/include/libcamera" \
"@TOP_BUILDDIR@/src/libcamera"
EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/span.h \
@TOP_SRCDIR@/include/libcamera/internal/device_enumerator_sysfs.h \
@TOP_SRCDIR@/include/libcamera/internal/device_enumerator_udev.h \
@TOP_SRCDIR@/include/libcamera/internal/ipc_pipe_unixsocket.h \
@TOP_SRCDIR@/src/libcamera/device_enumerator_sysfs.cpp \
@TOP_SRCDIR@/src/libcamera/device_enumerator_udev.cpp \
@TOP_SRCDIR@/src/libcamera/ipc_pipe_unixsocket.cpp \
@TOP_SRCDIR@/src/libcamera/pipeline/ \
@TOP_SRCDIR@/src/libcamera/sensor/camera_sensor_legacy.cpp \
@TOP_SRCDIR@/src/libcamera/sensor/camera_sensor_raw.cpp \
@TOP_SRCDIR@/src/libcamera/tracepoints.cpp \
@TOP_BUILDDIR@/include/libcamera/internal/tracepoints.h \
@TOP_BUILDDIR@/include/libcamera/ipa/soft_ipa_interface.h \
@TOP_BUILDDIR@/src/libcamera/proxy/

View file

@ -1,20 +0,0 @@
# SPDX-License-Identifier: CC-BY-SA-4.0
@INCLUDE_PATH = @TOP_BUILDDIR@/Documentation
@INCLUDE = Doxyfile-common
HIDE_UNDOC_CLASSES = YES
HIDE_UNDOC_MEMBERS = YES
HTML_OUTPUT = api-html
INTERNAL_DOCS = NO
INPUT = "@TOP_SRCDIR@/Documentation" \
${inputs}
EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/class.h \
@TOP_SRCDIR@/include/libcamera/base/object.h \
@TOP_SRCDIR@/include/libcamera/base/span.h \
@TOP_SRCDIR@/src/libcamera/base/class.cpp \
@TOP_SRCDIR@/src/libcamera/base/object.cpp
PREDEFINED += __DOXYGEN_PUBLIC__

View file

@ -20,19 +20,35 @@ TOC_INCLUDE_HEADINGS = 0
CASE_SENSE_NAMES = YES
QUIET = YES
WARN_AS_ERROR = @WARN_AS_ERROR@
INPUT = "@TOP_SRCDIR@/include/libcamera" \
"@TOP_SRCDIR@/src/ipa/ipu3" \
"@TOP_SRCDIR@/src/ipa/libipa" \
"@TOP_SRCDIR@/src/libcamera" \
"@TOP_BUILDDIR@/include/libcamera" \
"@TOP_BUILDDIR@/src/libcamera"
FILE_PATTERNS = *.c \
*.cpp \
*.dox \
*.h
RECURSIVE = YES
EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/span.h \
@TOP_SRCDIR@/include/libcamera/internal/device_enumerator_sysfs.h \
@TOP_SRCDIR@/include/libcamera/internal/device_enumerator_udev.h \
@TOP_SRCDIR@/include/libcamera/internal/ipc_pipe_unixsocket.h \
@TOP_SRCDIR@/src/libcamera/device_enumerator_sysfs.cpp \
@TOP_SRCDIR@/src/libcamera/device_enumerator_udev.cpp \
@TOP_SRCDIR@/src/libcamera/ipc_pipe_unixsocket.cpp \
@TOP_SRCDIR@/src/libcamera/pipeline/ \
@TOP_SRCDIR@/src/libcamera/tracepoints.cpp \
@TOP_BUILDDIR@/include/libcamera/internal/tracepoints.h \
@TOP_BUILDDIR@/src/libcamera/proxy/
EXCLUDE_PATTERNS = @TOP_BUILDDIR@/include/libcamera/ipa/*_serializer.h \
@TOP_BUILDDIR@/include/libcamera/ipa/*_proxy.h \
@TOP_BUILDDIR@/include/libcamera/ipa/ipu3_*.h \
@TOP_BUILDDIR@/include/libcamera/ipa/mali-c55_*.h \
@TOP_BUILDDIR@/include/libcamera/ipa/raspberrypi_*.h \
@TOP_BUILDDIR@/include/libcamera/ipa/rkisp1_*.h \
@TOP_BUILDDIR@/include/libcamera/ipa/vimc_*.h
@ -52,17 +68,16 @@ EXCLUDE_SYMBOLS = libcamera::BoundMethodArgs \
EXCLUDE_SYMLINKS = YES
HTML_OUTPUT = api-html
GENERATE_LATEX = NO
MACRO_EXPANSION = YES
EXPAND_ONLY_PREDEF = YES
INCLUDE_PATH = "@TOP_BUILDDIR@/include" \
"@TOP_SRCDIR@/include"
INCLUDE_PATH = "@TOP_SRCDIR@/include/libcamera"
INCLUDE_FILE_PATTERNS = *.h
IMAGE_PATH = "@TOP_SRCDIR@/Documentation/images"
PREDEFINED = __DOXYGEN__ \
__cplusplus \
__attribute__(x)= \

View file

@ -2,7 +2,7 @@
.. _api:
API Reference
=============
API
===
:: Placeholder for Doxygen documentation

File diff suppressed because it is too large Load diff

Before

Width:  |  Height:  |  Size: 194 KiB

View file

@ -1,175 +0,0 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
.. _camera-sensor-model:
.. todo: Move to Doxygen-generated documentation
The libcamera camera sensor model
=================================
libcamera defines an abstract camera sensor model in order to provide
a description of each of the processing steps that result in image data being
sent on the media bus and that form the image stream delivered to applications.
Applications should use the abstract camera sensor model defined here to
precisely control the operations of the camera sensor.
The libcamera camera sensor model targets image sensors producing frames in
RAW format, delivered through a MIPI CSI-2 compliant bus implementation.
The abstract sensor model maps libcamera components to the characteristics and
operations of an image sensor, and serves as a reference to model the libcamera
CameraSensor class and SensorConfiguration classes and operations.
In order to control the configuration of the camera sensor through the
SensorConfiguration class, applications should understand this model and map it
to the combination of image sensor and kernel driver in use.
The camera sensor model defined here is based on the *MIPI CCS specification*,
particularly on *Section 8.2 - Image readout* of *Chapter 8 - Video Timings*.
Glossary
--------
.. glossary::
Pixel array
The full grid of pixels, active and inactive ones
Pixel array active area
The portion(s) of the pixel array that contains valid and readable pixels;
corresponds to the libcamera properties::PixelArrayActiveAreas
Analog crop rectangle
The portion of the *pixel array active area* which is read out and passed
to further processing stages
Subsampling
Pixel processing techniques that reduce the image size by binning or by
skipping adjacent pixels
Digital crop
Crop of the sub-sampled image data before scaling
Frame output
The frame (image) as output on the media bus by the camera sensor
Camera sensor model
-------------------
The abstract sensor model is described in the following diagram.
.. figure:: sensor_model.svg
1. The sensor reads pixels from the *pixel array*. The pixels being read out are
selected by the *analog crop rectangle*.
2. The pixels can be subsampled to reduce the image size without affecting the
field of view. Two subsampling techniques can be used:
- Binning: combines adjacent pixels of the same colour by averaging or
summing their values, in the analog domain and/or the digital domain.
.. figure:: binning.svg
- Skipping: skips the read out of a number of adjacent pixels.
.. figure:: skipping.svg
3. The output of the optional sub-sampling stage is then cropped after the
conversion of the analogue pixel values in the digital domain.
4. The resulting output frame is sent on the media bus by the sensor.
Camera Sensor configuration parameters
--------------------------------------
The libcamera camera sensor model defines parameters that allow users to
control:
1. The image format bit depth
2. The size and position of the *Analog crop rectangle*
3. The subsampling factors used to downscale the pixel array readout data to a
smaller frame size without reducing the image *field of view*. Two
configuration parameters are made available to control the downscaling
factor:
- binning
A vertical and horizontal binning factor can be specified, the image
will be downscaled in its vertical and horizontal sizes by the specified
factor.
.. code-block:: c
:caption: Definition: The horizontal and vertical binning factors
horizontal_binning = xBin;
vertical_binning = yBin;
- skipping
Skipping reduces the image resolution by skipping the read-out of a number
of adjacent pixels. The skipping factor is specified by the 'increment'
number (number of pixels to 'skip') in the vertical and horizontal
directions and for even and odd rows and columns.
.. code-block:: c
:caption: Definition: The horizontal and vertical skipping factors
horizontal_skipping = (xOddInc + xEvenInc) / 2;
vertical_skipping = (yOddInc + yEvenInc) / 2;
Different sensors perform the binning and skipping stages in different
orders. For the sake of computing the final output image size the order of
execution is not relevant. The overall down-scaling factor is obtained by
combining the binning and skipping factors.
.. code-block:: c
:caption: Definition: The total scaling factor (binning + sub-sampling)
total_horizontal_downscale = horizontal_binning + horizontal_skipping;
total_vertical_downscale = vertical_binning + vertical_skipping;
4. The output size is used to specify any additional cropping on the sub-sampled
frame.
5. The total line length and frame height (*visibile* pixels + *blankings*) as
sent on the MIPI CSI-2 bus.
6. The pixel transmission rate on the MIPI CSI-2 bus.
The above parameters are combined to obtain the following high-level
configurations:
- **frame output size**
Obtained by applying a crop to the physical pixel array size in the analog
domain, followed by optional binning and sub-sampling (in any order),
followed by an optional crop step in the output digital domain.
- **frame rate**
The combination of the *total frame size*, the image format *bit depth* and
the *pixel rate* of the data sent on the MIPI CSI-2 bus allows to compute the
image stream frame rate. The equation is the well known:
.. code-block:: c
frame_duration = total_frame_size / pixel_rate;
frame_rate = 1 / frame_duration;
where the *pixel_rate* parameter is the result of the sensor's configuration
of the MIPI CSI-2 bus *(the following formula applies to MIPI CSI-2 when
used on MIPI D-PHY physical protocol layer only)*
.. code-block:: c
pixel_rate = csi_2_link_freq * 2 * nr_of_lanes / bits_per_sample;

View file

@ -1,96 +0,0 @@
.. SPDX-License-Identifier: CC-BY-4.0
.. include:: documentation-contents.rst
.. _code-of-conduct:
Contributor Covenant Code of Conduct
====================================
Our Pledge
----------
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to make participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
Our Standards
-------------
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
Our Responsibilities
--------------------
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
Scope
-----
This Code of Conduct applies within all project spaces, and it also applies when
an individual is representing the project or its community in public spaces.
Examples of representing a project or community include using an official
project e-mail address, posting via an official social media account, or acting
as an appointed representative at an online or offline event. Representation of
a project may be further defined and clarified by project maintainers.
Enforcement
-----------
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at conduct@libcamera.org, or directly to
any member of the code of conduct team:
* Kieran Bingham <kieran.bingham@ideasonboard.com>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
All complaints will be reviewed and investigated and will result in a response
that is deemed necessary and appropriate to the circumstances. The project team
is obligated to maintain confidentiality with regard to the reporter of an
incident. Further details of specific enforcement policies may be posted
separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
Attribution
-----------
This Code of Conduct is adapted from the `Contributor Covenant`_, version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
.. _Contributor Covenant: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
.. _coding-style-guidelines:
Coding Style Guidelines
@ -61,7 +59,7 @@ document:
underscores in between
* All formatting rules specified in the selected sections of the Linux kernel
Code Style for indentation, braces, spacing, etc
* Headers are guarded by the use of '#pragma once'
* Header guards are formatted as '__LIBCAMERA_FILE_NAME_H__'
Order of Includes
~~~~~~~~~~~~~~~~~

View file

@ -37,11 +37,8 @@ author = u'Kieran Bingham, Jacopo Mondi, Laurent Pinchart, Niklas Söderlund'
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.graphviz'
]
graphviz_output_format = 'svg'
# Add any paths that contain templates here, relative to this directory.
templates_path = []
@ -64,12 +61,7 @@ language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
'_build',
'Thumbs.db',
'.DS_Store',
'documentation-contents.rst',
]
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None

View file

@ -8,10 +8,6 @@ Whether you would like to help with coding, documentation, testing, proposing
new features, or just discussing the project with the community, you can join
our official public communication channels, or simply check out the code.
The project adheres to a :ref:`code of conduct <code-of-conduct>` that
maintainers, contributors and community members are expected to follow in all
online and offline communication.
Mailing List
------------
@ -138,5 +134,4 @@ By making a contribution to this project, I certify that:
.. toctree::
:hidden:
Code of Conduct <code-of-conduct>
Coding Style <coding-style>

View file

@ -1,331 +0,0 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
Design of Exposure and Gain controls
====================================
This document explains the design and rationale of the controls related to
exposure and gain. This includes the all-encompassing auto-exposure (AE), the
manual exposure control, and the manual gain control.
Description of the problem
--------------------------
Sub controls
^^^^^^^^^^^^
There are more than one control that make up total exposure: exposure time,
gain, and aperture (though for now we will not consider aperture). We already
had individual controls for setting the values of manual exposure and manual
gain, but for switching between auto mode and manual mode we only had a
high-level boolean AeEnable control that would set *both* exposure and gain to
auto mode or manual mode; we had no way to set one to auto and the other to
manual.
So, we need to introduce two new controls to act as "levers" to indicate
individually for exposure and gain if the value would come from AEGC or if it
would come from the manual control value.
Aperture priority
^^^^^^^^^^^^^^^^^
We eventually may need to support aperture, and so whatever our solution is for
having only some controls on auto and the others on manual needs to be
extensible.
Flickering when going from auto to manual
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When a manual exposure or gain value is requested by the application, it costs
a few frames worth of time for them to take effect. This means that during a
transition from auto to manual, there would be flickering in the control values
and the transition won't be smooth.
Take for instance the following flow, where we start on auto exposure (which
for the purposes of the example increments by 1 each frame) and we want to
switch seamlessly to manual exposure, which involves copying the exposure value
computed by the auto exposure algorithm:
::
+-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
| N | | N+1 | | N+2 | | N+3 | | N+4 | | N+5 | | N+6 |
+-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
Mode requested: Auto Auto Auto Manual Manual Manual Manual
Exp requested: N/A N/A N/A 2 2 2 2
Set in Frame: N+2 N+3 N+4 N+5 N+6 N+7 N+8
Mode used: Auto Auto Auto Auto Auto Manual Manual
Exp used: 0 1 2 3 4 2 2
As we can see, after frame N+2 completes, we copy the exposure value that was
used for frame N+2 (which was computed by AE algorithm), and queue that value
into request N+3 with manual mode on. However, as it takes two frames for the
exposure to be set, the exposure still changes since it is set by AE, and we
get a flicker in the exposure during the switch from auto to manual.
A solution is to *not submit* any exposure value when manual mode is enabled,
and wait until the manual mode as been "applied" before copying the exposure
value:
::
+-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
| N | | N+1 | | N+2 | | N+3 | | N+4 | | N+5 | | N+6 |
+-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
Mode requested: Auto Auto Auto Manual Manual Manual Manual
Exp requested: N/A N/A N/A None None None 5
Set in Frame: N+2 N+3 N+4 N+5 N+6 N+7 N+8
Mode used: Auto Auto Auto Auto Auto Manual Manual
Exp used: 0 1 2 3 4 5 5
In practice, this works. However, libcamera has a policy where once a control
is submitted, its value is saved and does not need to be resubmitted. If the
manual exposure value was set while auto mode was on, in theory the value would
be saved, so when manual mode is enabled, the exposure value that was
previously set would immediately be used. Clearly this solution isn't correct,
but it can serve as the basis for a proper solution, with some more rigorous
rules.
Existing solutions
------------------
Raspberry Pi
^^^^^^^^^^^^
The Raspberry Pi IPA gets around the lack of individual AeEnable controls for
exposure and gain by using magic values. When AeEnable is false, if one of the
manual control values was set to 0 then the value computed by AEGC would be
used for just that control. This solution isn't desirable, as it prevents
that magic value from being used as a valid value.
To get around the flickering issue, when AeEnable is false, the Raspberry Pi
AEGC simply stops updating the values to be set, without restoring the
previously set manual exposure time and gain. This works, but is not a proper
solution.
Android
^^^^^^^
The Android HAL specification requires that exposure and gain (sensitivity)
must both be manual or both be auto. It cannot be that one is manual while the
other is auto, so they simply don't support sub controls.
For the flickering issue, the Android HAL has an AeLock control. To transition
from auto to manual, the application would keep AE on auto, and turn on the
lock. Once the lock has propagated through, then the value can be copied from
the result into the request and the lock disabled and the mode set to manual.
The problem with this solution is, besides the extra complexity, that it is
ambiguous what happens if there is a state transition from manual to locked
(even though it's a state transition that doesn't make sense). If locked is
defined to "use the last automatically computed values" then it could use the
values from the last time it AE was set to auto, or it would be undefined if AE
was never auto (eg. it started out as manual), or if AE is implemented to run
in the background it could just use the current values that are computed. If
locked is defined to "use the last value that was set" there would be less
ambiguity. Still, it's better if we can make it impossible to execute this
nonsensical state transition, and if we can reduce the complexity of having
this extra control or extra setting on a lever.
Summary of goals
----------------
- We need a lock of some sort, to instruct the AEGC to not update output
results
- We need manual modes, to override the values computed by the AEGC
- We need to support seamless transitions from auto to manual, and do so
without flickering
- We need custom minimum values for the manual controls; that is, no magic
values for enabling/disabling auto
- All of these need to be done with AE sub-controls (exposure time, analogue
gain) and be extensible to aperture in the future
Our solution
------------
A diagram of our solution:
::
+----------------------------+-------------+------------------+-----------------+
| INPUT | ALGORITHM | RESULT | OUTPUT |
+----------------------------+-------------+------------------+-----------------+
ExposureTimeMode ExposureTimeMode
---------------------+----------------------------------------+----------------->
0: Auto | |
1: Manual | V
| |\
| | \
| /----------------------------------> | 1| ExposureTime
| | +-------------+ exposure time | | -------------->
\--)--> | | --------------> | 0|
ExposureTime | | | | /
------------------------+--> | | |/
| | AeState
| AEGC | ----------------------------------->
AnalogueGain | |
------------------------+--> | | |\
| | | | \
/--)--> | | --------------> | 0| AnalogueGain
| | +-------------+ analogue gain | | -------------->
| \----------------------------------> | 1|
| | /
| |/
| ^
AnalogueGainMode | | AnalogueGainMode
---------------------+----------------------------------------+----------------->
0: Auto
1: Manual
AeEnable
- True -> ExposureTimeMode:Auto + AnalogueGainMode:Auto
- False -> ExposureTimeMode:Manual + AnalogueGainMode:Manual
The diagram is divided in four sections horizontally:
- Input: The values received from the request controls
- Algorithm: The algorithm itself
- Result: The values calculated by the algorithm
- Output: The values reported in result metadata and applied to the device
The four input controls are divided between manual values (ExposureTime and
AnalogueGain), and operation modes (ExposureTimeMode and AnalogueGainMode). The
former are the manual values, the latter control how they're applied. The two
modes are independent from each other, and each can take one of two values:
- Auto (0): The AGC computes the value normally. The AGC result is applied
to the output. The manual value is ignored *and is not retained*.
- Manual (1): The AGC uses the manual value internally. The corresponding
manual control from the request is applied to the output. The AGC result
is ignored.
The AeState control reports the state of the unified AEGC block. If both
ExposureTimeMode and AnalogueGainMode are set to manual then it will report
Idle. If at least one of the two is set to auto, then AeState will report
if the AEGC has Converged or not (Searching). This control replaces the old
AeLocked control, as it was insufficient for reporting the AE state.
There is a caveat to manual mode: the manual control value is not retained if
it is set during auto mode. This means that if manual mode is entered without
also setting the manual value, then it will enter a state similar to "locked",
where the last automatically computed value while the mode was auto will be
used. Once the manual value is set, then that will be used and retained as
usual.
This simulates an auto -> locked -> manual or auto -> manual state transition,
and makes it impossible to do the nonsensical manual -> locked state
transition.
AeEnable still exists to allow applications to set the mode of all the
sub-controls at once. Besides being for convenience, this will also be useful
when we eventually implement an aperture control. This is because applications
that will be made before aperture will have been available would still be able
to set aperture mode to auto or manual, as opposed to having the aperture stuck
at auto while the application really wanted manual. Although the aperture would
still be stuck at an uncontrollable value, at least it would be at a static
usable value as opposed to varying via the AEGC algorithm.
With this solution, the earlier example would become:
::
+-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
| N+2 | | N+3 | | N+4 | | N+5 | | N+6 | | N+7 | | N+8 | | N+9 | | N+10|
+-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
Mode requested: Auto Manual Manual Manual Manual Manual Manual Manual Manual
Exp requested: N/A None None None None 10 None 10 10
Set in Frame: N+4 N+5 N+6 N+7 N+8 N+9 N+10 N+11 N+12
Mode used: Auto Auto Auto Manual Manual Manual Manual Manual Manual
Exp used: 2 3 4 5 5 5 5 10 10
This example is extended by a few frames to exhibit the simulated "locked"
state. At frame N+5 the application has confirmed that the manual mode has been
entered, but does not provide a manual value until request N+7. Thus, the value
that is used in requests N+5 and N+6 (where the mode is disabled), comes from
the last value that was used when the mode was auto, which comes from frame
N+4.
Then, in N+7, a manual value of 10 is supplied. It takes until frame N+9 for
the exposure to be applied. N+8 does not supply a manual value, but the last
supplied value is retained, so a manual value of 10 is still used and set in
frame N+10.
Although this behavior is the same as what we had with waiting for the manual
mode to propagate (in the section "Description of the problem"), this time it
is correct as we have defined specifically that if a manual value was specified
while the mode was auto, it will not be retained.
Description of the controls
---------------------------
As described above, libcamera offers the following controls related to exposure
and gain:
- AnalogueGain
- AnalogueGainMode
- ExposureTime
- ExposureTimeMode
- AeState
- AeEnable
Auto-exposure and auto-gain can be enabled and disabled separately using the
ExposureTimeMode and AnalogueGainMode controls respectively. The AeEnable
control can also be used, as it sets both of the modes simultaneously. The
AeEnable control is not returned in metadata.
When the respective mode is set to auto, the respective value that is computed
by the AEGC algorithm is applied to the image sensor. Any value that is
supplied in the manual ExposureTime/AnalogueGain control is ignored and not
retained. Another way to understand this is that when the mode transitions from
auto to manual, the internally stored control value is overwritten with the
last value computed by the auto algorithm.
This means that when we transition from auto to manual without supplying a
manual control value, the last value that was set by the AEGC algorithm will
keep be used. This can be used to do a flickerless transition from auto to
manual as described earlier. If the camera started out in manual mode and no
corresponding value has been supplied yet, then a best-effort default value
shall be set.
The manual control value can be set in the same request as setting the mode to
auto if the desired manual control value is already known.
Transitioning from manual to auto shall be implicitly flickerless, as the AEGC
algorithms are expected to start running from the last manual value.
The AeState metadata reports the state of the AE algorithm. As AE cannot
compute exposure and gain separately, the state of the AE component is
unified. There are three states: Idle, Searching, and Converged.
The state shall be Idle if both ExposureTimeMode and AnalogueGainMode
are set to Manual. If the camera only supports one of the two controls,
then the state shall be Idle if that one control is set to Manual. If
the camera does not support Manual for at least one of the two controls,
then the state will never be Idle, as AE will always be running.
The state shall be Searching if at least one of exposure or gain calculated
by the AE algorithm is used (that is, at least one of the two modes is Auto),
*and* the value(s) have not converged yet.
The state shall be Converged if at least one of exposure or gain calculated
by the AE algorithm is used (that is, at least one of the two modes is Auto),
*and* the value(s) have converged.

400
Documentation/docs.rst Normal file
View file

@ -0,0 +1,400 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. contents::
:local:
*************
Documentation
*************
.. toctree::
:hidden:
API <api-html/index>
API
===
The libcamera API is extensively documented using Doxygen. The :ref:`API
nightly build <api>` contains the most up-to-date API documentation, built from
the latest master branch.
Feature Requirements
====================
Device enumeration
------------------
The library shall support enumerating all camera devices available in the
system, including both fixed cameras and hotpluggable cameras. It shall
support cameras plugged and unplugged after the initialization of the
library, and shall offer a mechanism to notify applications of camera plug
and unplug.
The following types of cameras shall be supported:
* Internal cameras designed for point-and-shoot still image and video
capture usage, either controlled directly by the CPU, or exposed through
an internal USB bus as a UVC device.
* External UVC cameras designed for video conferencing usage.
Other types of camera, including analog cameras, depth cameras, thermal
cameras, external digital picture or movie cameras, are out of scope for
this project.
A hardware device that includes independent camera sensors, such as front
and back sensors in a phone, shall be considered as multiple camera devices
for the purpose of this library.
Independent Camera Devices
--------------------------
When multiple cameras are present in the system and are able to operate
independently from each other, the library shall expose them as multiple
camera devices and support parallel operation without any additional usage
restriction apart from the limitations inherent to the hardware (such as
memory bandwidth, CPU usage or number of CSI-2 receivers for instance).
Independent processes shall be able to use independent cameras devices
without interfering with each other. A single camera device shall be
usable by a single process at a time.
Multiple streams support
------------------------
The library shall support multiple video streams running in parallel
for each camera device, within the limits imposed by the system.
Per frame controls
------------------
The library shall support controlling capture parameters for each stream
on a per-frame basis, on a best effort basis based on the capabilities of the
hardware and underlying software stack (including kernel drivers and
firmware). It shall apply capture parameters to the frame they target, and
report the value of the parameters that have effectively been used for each
captured frame.
When a camera device supports multiple streams, the library shall allow both
control of each stream independently, and control of multiple streams
together. Streams that are controlled together shall be synchronized. No
synchronization is required for streams controlled independently.
Capability Enumeration
----------------------
The library shall expose capabilities of each camera device in a way that
allows applications to discover those capabilities dynamically. Applications
shall be allowed to cache capabilities for as long as they are using the
library. If capabilities can change at runtime, the library shall offer a
mechanism to notify applications of such changes. Applications shall not
cache capabilities in long term storage between runs.
Capabilities shall be discovered dynamically at runtime from the device when
possible, and may come, in part or in full, from platform configuration
data.
Device Profiles
---------------
The library may define different camera device profiles, each with a minimum
set of required capabilities. Applications may use those profiles to quickly
determine the level of features exposed by a device without parsing the full
list of capabilities. Camera devices may implement additional capabilities
on top of the minimum required set for the profile they expose.
3A and Image Enhancement Algorithms
-----------------------------------
The camera devices shall implement auto exposure, auto gain and auto white
balance. Camera devices that include a focus lens shall implement auto
focus. Additional image enhancement algorithms, such as noise reduction or
video stabilization, may be implemented.
All algorithms may be implemented in hardware or firmware outside of the
library, or in software in the library. They shall all be controllable by
applications.
The library shall be architectured to isolate the 3A and image enhancement
algorithms in a component with a documented API, respectively called the 3A
component and the 3A API. The 3A API shall be stable, and shall allow both
open-source and closed-source implementations of the 3A component.
The library may include statically-linked open-source 3A components, and
shall support dynamically-linked open-source and closed-source 3A
components.
Closed-source 3A Component Sandboxing
-------------------------------------
For security purposes, it may be desired to run closed-source 3A components
in a separate process. The 3A API would in such a case be transported over
IPC. The 3A API shall make it possible to use any IPC mechanism that
supports passing file descriptors.
The library may implement an IPC mechanism, and shall support third-party
platform-specific IPC mechanisms through the implementation of a
platform-specific 3A API wrapper. No modification to the library shall be
needed to use such third-party IPC mechanisms.
The 3A component shall not directly access any device node on the system.
Such accesses shall instead be performed through the 3A API. The library
shall validate all accesses and restrict them to what is absolutely required
by 3A components.
V4L2 Compatibility Layer
------------------------
The project shall support traditional V4L2 application through an additional
libcamera wrapper library. The wrapper library shall trap all accesses to
camera devices through `LD_PRELOAD`, and route them through libcamera to
emulate a high-level V4L2 camera device. It shall expose camera device
features on a best-effort basis, and aim for the level of features
traditionally available from a UVC camera designed for video conferencing.
Android Camera HAL v3 Compatibility
-----------------------------------
The library API shall expose all the features required to implement an
Android Camera HAL v3 on top of libcamera. Some features of the HAL may be
omitted as long as they can be implemented separately in the HAL, such as
JPEG encoding, or YUV reprocessing.
Camera Stack
============
::
a c / +-------------+ +-------------+ +-------------+ +-------------+
p a | | Native | | Framework | | Native | | Android |
p t | | V4L2 | | Application | | libcamera | | Camera |
l i | | Application | | (gstreamer) | | Application | | Framework |
i o \ +-------------+ +-------------+ +-------------+ +-------------+
n ^ ^ ^ ^
| | | |
l a | | | |
i d v v | v
b a / +-------------+ +-------------+ | +-------------+
c p | | V4L2 | | Camera | | | Android |
a t | | Compat. | | Framework | | | Camera |
m a | | | | (gstreamer) | | | HAL |
e t \ +-------------+ +-------------+ | +-------------+
r i ^ ^ | ^
a o | | | |
n | | | |
/ | ,................................................
| | ! : Language : !
l f | | ! : Bindings : !
i r | | ! : (optional) : !
b a | | \...............................................'
c m | | | | |
a e | | | | |
m w | v v v v
e o | +----------------------------------------------------------------+
r r | | |
a k | | libcamera |
| | |
\ +----------------------------------------------------------------+
^ ^ ^
Userspace | | |
------------------------ | ---------------- | ---------------- | ---------------
Kernel | | |
v v v
+-----------+ +-----------+ +-----------+
| Media | <--> | Video | <--> | V4L2 |
| Device | | Device | | Subdev |
+-----------+ +-----------+ +-----------+
The camera stack comprises four software layers. From bottom to top:
* The kernel drivers control the camera hardware and expose a
low-level interface to userspace through the Linux kernel V4L2
family of APIs (Media Controller API, V4L2 Video Device API and
V4L2 Subdev API).
* The libcamera framework is the core part of the stack. It
handles all control of the camera devices in its core component,
libcamera, and exposes a native C++ API to upper layers. Optional
language bindings allow interfacing to libcamera from other
programming languages.
Those components live in the same source code repository and
all together constitute the libcamera framework.
* The libcamera adaptation is an umbrella term designating the
components that interface to libcamera in other frameworks.
Notable examples are a V4L2 compatibility layer, a gstreamer
libcamera element, and an Android camera HAL implementation based
on libcamera.
Those components can live in the libcamera project source code
in separate repositories, or move to their respective project's
repository (for instance the gstreamer libcamera element).
* The applications and upper level frameworks are based on the
libcamera framework or libcamera adaptation, and are outside of
the scope of the libcamera project.
libcamera Architecture
======================
::
---------------------------< libcamera Public API >---------------------------
^ ^
| |
v v
+-------------+ +-------------------------------------------------+
| Camera | | Camera Device |
| Devices | | +---------------------------------------------+ |
| Manager | | | Device-Agnostic | |
+-------------+ | | | |
^ | | +------------------------+ |
| | | | ~~~~~~~~~~~~~~~~~~~~~ |
| | | | { +---------------+ } |
| | | | } | ////Image//// | { |
| | | | <-> | /Processing// | } |
| | | | } | /Algorithms// | { |
| | | | { +---------------+ } |
| | | | ~~~~~~~~~~~~~~~~~~~~~ |
| | | | ======================== |
| | | | +---------------+ |
| | | | | //Pipeline/// | |
| | | | <-> | ///Handler/// | |
| | | | | ///////////// | |
| | +--------------------+ +---------------+ |
| | Device-Specific |
| +-------------------------------------------------+
| ^ ^
| | |
v v v
+--------------------------------------------------------------------+
| Helpers and Support Classes |
| +-------------+ +-------------+ +-------------+ +-------------+ |
| | MC & V4L2 | | Buffers | | Sandboxing | | Plugins | |
| | Support | | Allocator | | IPC | | Manager | |
| +-------------+ +-------------+ +-------------+ +-------------+ |
| +-------------+ +-------------+ |
| | Pipeline | | ... | |
| | Runner | | | |
| +-------------+ +-------------+ |
+--------------------------------------------------------------------+
/// Device-Specific Components
~~~ Sandboxing
While offering a unified API towards upper layers, and presenting
itself as a single library, libcamera isn't monolithic. It exposes
multiple components through its public API, is built around a set of
separate helpers internally, uses device-specific components and can
load dynamic plugins.
Camera Devices Manager
The Camera Devices Manager provides a view of available cameras
in the system. It performs cold enumeration and runtime camera
management, and supports a hotplug notification mechanism in its
public API.
To avoid the cost associated with cold enumeration of all devices
at application start, and to arbitrate concurrent access to camera
devices, the Camera Devices Manager could later be split to a
separate service, possibly with integration in platform-specific
device management.
Camera Device
The Camera Device represents a camera device to upper layers. It
exposes full control of the device through the public API, and is
thus the highest level object exposed by libcamera.
Camera Device instances are created by the Camera Devices
Manager. An optional function to create new instances could be exposed
through the public API to speed up initialization when the upper
layer knows how to directly address camera devices present in the
system.
Pipeline Handler
The Pipeline Handler manages complex pipelines exposed by the kernel drivers
through the Media Controller and V4L2 APIs. It abstracts pipeline handling to
hide device-specific details to the rest of the library, and implements both
pipeline configuration based on stream configuration, and pipeline runtime
execution and scheduling when needed by the device.
This component is device-specific and is part of the libcamera code base. As
such it is covered by the same free software license as the rest of libcamera
and needs to be contributed upstream by device vendors. The Pipeline Handler
lives in the same process as the rest of the library, and has access to all
helpers and kernel camera-related devices.
Image Processing Algorithms
Together with the hardware image processing and hardware statistics
collection, the Image Processing Algorithms implement 3A (Auto-Exposure,
Auto-White Balance and Auto-Focus) and other algorithms. They run on the CPU
and interact with the kernel camera devices to control hardware image
processing based on the parameters supplied by upper layers, closing the
control loop of the ISP.
This component is device-specific and is loaded as an external plugin. It can
be part of the libcamera code base, in which case it is covered by the same
license, or provided externally as an open-source or closed-source component.
The component is sandboxed and can only interact with libcamera through
internal APIs specifically marked as such. In particular it will have no
direct access to kernel camera devices, and all its accesses to image and
metadata will be mediated by dmabuf instances explicitly passed to the
component. The component must be prepared to run in a process separate from
the main libcamera process, and to have a very restricted view of the system,
including no access to networking APIs and limited access to file systems.
The sandboxing mechanism isn't defined by libcamera. One example
implementation will be provided as part of the project, and platforms vendors
will be able to provide their own sandboxing mechanism as a plugin.
libcamera should provide a basic implementation of Image Processing
Algorithms, to serve as a reference for the internal API. Device vendors are
expected to provide a full-fledged implementation compatible with their
Pipeline Handler. One goal of the libcamera project is to create an
environment in which the community will be able to compete with the
closed-source vendor binaries and develop a high quality open source
implementation.
Helpers and Support Classes
While Pipeline Handlers are device-specific, implementations are expected to
share code due to usage of identical APIs towards the kernel camera drivers
and the Image Processing Algorithms. This includes without limitation handling
of the MC and V4L2 APIs, buffer management through dmabuf, and pipeline
discovery, configuration and scheduling. Such code will be factored out to
helpers when applicable.
Other parts of libcamera will also benefit from factoring code out to
self-contained support classes, even if such code is present only once in the
code base, in order to keep the source code clean and easy to read. This
should be the case for instance for plugin management.
V4L2 Compatibility Layer
------------------------
V4L2 compatibility is achieved through a shared library that traps all
accesses to camera devices and routes them to libcamera to emulate high-level
V4L2 camera devices. It is injected in a process address space through
`LD_PRELOAD` and is completely transparent for applications.
The compatibility layer exposes camera device features on a best-effort basis,
and aims for the level of features traditionally available from a UVC camera
designed for video conferencing.
Android Camera HAL
------------------
Camera support for Android is achieved through a generic Android
camera HAL implementation on top of libcamera. The HAL will implement internally
features required by Android and missing from libcamera, such as JPEG encoding
support.
The Android camera HAL implementation will initially target the
LIMITED hardware level, with support for the FULL level then being gradually
implemented.

View file

@ -1,35 +0,0 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. container:: documentation-nav
* **Documentation for Users**
* :doc:`Introduction </introduction>`
* :doc:`/feature_requirements`
* :doc:`/guides/application-developer`
* :doc:`/python-bindings`
* :doc:`/environment_variables`
* :doc:`/api-html/index`
* :doc:`/code-of-conduct`
* |
* **Documentation for Developers**
* :doc:`/libcamera_architecture`
* :doc:`/guides/pipeline-handler`
* :doc:`/guides/ipa`
* :doc:`/camera-sensor-model`
* :doc:`/guides/tracing`
* :doc:`/software-isp-benchmarking`
* :doc:`/coding-style`
* :doc:`/internal-api-html/index`
* |
* **Documentation for System Integrators**
* :doc:`/lens_driver_requirements`
* :doc:`/sensor_driver_requirements`
..
The following directive adds the "documentation" class to all of the pages
generated by sphinx. This is not relevant in libcamera nor addressed in the
theme's CSS, since all of the pages here are documentation. It **is** used
to properly format the documentation pages on libcamera.org and so should not
be removed.
.. rst-class:: documentation

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
Environment variables
=====================
@ -39,29 +37,11 @@ LIBCAMERA_IPA_MODULE_PATH
Example value: ``${HOME}/.libcamera/lib:/opt/libcamera/vendor/lib``
LIBCAMERA_IPA_PROXY_PATH
Define custom full path for a proxy worker for a given executable name.
Example value: ``${HOME}/.libcamera/proxy/worker:/opt/libcamera/vendor/proxy/worker``
LIBCAMERA_PIPELINES_MATCH_LIST
Define an ordered list of pipeline names to be used to match the media
devices in the system. The pipeline handler names used to populate the
variable are the ones passed to the REGISTER_PIPELINE_HANDLER() macro in the
source code.
Example value: ``rkisp1,simple``
LIBCAMERA_RPI_CONFIG_FILE
Define a custom configuration file to use in the Raspberry Pi pipeline handler.
Example value: ``/usr/local/share/libcamera/pipeline/rpi/vc4/minimal_mem.yaml``
LIBCAMERA_<NAME>_TUNING_FILE
Define a custom IPA tuning file to use with the pipeline handler `NAME`.
Example value: ``/usr/local/share/libcamera/ipa/rpi/vc4/custom_sensor.json``
Further details
---------------

View file

@ -1,150 +0,0 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
Feature Requirements
====================
Device enumeration
------------------
The library shall support enumerating all camera devices available in the
system, including both fixed cameras and hotpluggable cameras. It shall
support cameras plugged and unplugged after the initialization of the
library, and shall offer a mechanism to notify applications of camera plug
and unplug.
The following types of cameras shall be supported:
* Internal cameras designed for point-and-shoot still image and video
capture usage, either controlled directly by the CPU, or exposed through
an internal USB bus as a UVC device.
* External UVC cameras designed for video conferencing usage.
Other types of camera, including analog cameras, depth cameras, thermal
cameras, external digital picture or movie cameras, are out of scope for
this project.
A hardware device that includes independent camera sensors, such as front
and back sensors in a phone, shall be considered as multiple camera devices
for the purpose of this library.
Independent Camera Devices
--------------------------
When multiple cameras are present in the system and are able to operate
independently from each other, the library shall expose them as multiple
camera devices and support parallel operation without any additional usage
restriction apart from the limitations inherent to the hardware (such as
memory bandwidth, CPU usage or number of CSI-2 receivers for instance).
Independent processes shall be able to use independent cameras devices
without interfering with each other. A single camera device shall be
usable by a single process at a time.
Multiple streams support
------------------------
The library shall support multiple video streams running in parallel
for each camera device, within the limits imposed by the system.
Per frame controls
------------------
The library shall support controlling capture parameters for each stream
on a per-frame basis, on a best effort basis based on the capabilities of the
hardware and underlying software stack (including kernel drivers and
firmware). It shall apply capture parameters to the frame they target, and
report the value of the parameters that have effectively been used for each
captured frame.
When a camera device supports multiple streams, the library shall allow both
control of each stream independently, and control of multiple streams
together. Streams that are controlled together shall be synchronized. No
synchronization is required for streams controlled independently.
Capability Enumeration
----------------------
The library shall expose capabilities of each camera device in a way that
allows applications to discover those capabilities dynamically. Applications
shall be allowed to cache capabilities for as long as they are using the
library. If capabilities can change at runtime, the library shall offer a
mechanism to notify applications of such changes. Applications shall not
cache capabilities in long term storage between runs.
Capabilities shall be discovered dynamically at runtime from the device when
possible, and may come, in part or in full, from platform configuration
data.
Device Profiles
---------------
The library may define different camera device profiles, each with a minimum
set of required capabilities. Applications may use those profiles to quickly
determine the level of features exposed by a device without parsing the full
list of capabilities. Camera devices may implement additional capabilities
on top of the minimum required set for the profile they expose.
3A and Image Enhancement Algorithms
-----------------------------------
The library shall provide a basic implementation of Image Processing Algorithms
to serve as a reference for the internal API. This shall including auto exposure
and gain and auto white balance. Camera devices that include a focus lens shall
implement auto focus. Additional image enhancement algorithms, such as noise
reduction or video stabilization, may be implemented. Device vendors are
expected to provide a fully-fledged implementation compatible with their
Pipeline Handler. One goal of the libcamera project is to create an environment
in which the community will be able to compete with the closed-source vendor
biaries and develop a high quality open source implementation.
All algorithms may be implemented in hardware or firmware outside of the
library, or in software in the library. They shall all be controllable by
applications.
The library shall be architectured to isolate the 3A and image enhancement
algorithms in a component with a documented API, respectively called the 3A
component and the 3A API. The 3A API shall be stable, and shall allow both
open-source and closed-source implementations of the 3A component.
The library may include statically-linked open-source 3A components, and
shall support dynamically-linked open-source and closed-source 3A
components.
Closed-source 3A Component Sandboxing
-------------------------------------
For security purposes, it may be desired to run closed-source 3A components
in a separate process. The 3A API would in such a case be transported over
IPC. The 3A API shall make it possible to use any IPC mechanism that
supports passing file descriptors.
The library may implement an IPC mechanism, and shall support third-party
platform-specific IPC mechanisms through the implementation of a
platform-specific 3A API wrapper. No modification to the library shall be
needed to use such third-party IPC mechanisms.
The 3A component shall not directly access any device node on the system.
Such accesses shall instead be performed through the 3A API. The library
shall validate all accesses and restrict them to what is absolutely required
by 3A components.
V4L2 Compatibility Layer
------------------------
The project shall support traditional V4L2 application through an additional
libcamera wrapper library. The wrapper library shall trap all accesses to
camera devices through `LD_PRELOAD`, and route them through libcamera to
emulate a high-level V4L2 camera device. It shall expose camera device
features on a best-effort basis, and aim for the level of features
traditionally available from a UVC camera designed for video conferencing.
Android Camera HAL v3 Compatibility
-----------------------------------
The library API shall expose all the features required to implement an
Android Camera HAL v3 on top of libcamera. Some features of the HAL may be
omitted as long as they can be implemented separately in the HAL, such as
JPEG encoding, or YUV reprocessing.

View file

@ -1,46 +0,0 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (C) 2024, Google Inc.
#
# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
#
# Generate Doxyfile from a template
import argparse
import os
import string
import sys
def fill_template(template, data):
template = open(template, 'rb').read()
template = template.decode('utf-8')
template = string.Template(template)
return template.substitute(data)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-o', dest='output', metavar='file',
type=argparse.FileType('w', encoding='utf-8'),
default=sys.stdout,
help='Output file name (default: standard output)')
parser.add_argument('template', metavar='doxyfile.tmpl', type=str,
help='Doxyfile template')
parser.add_argument('inputs', type=str, nargs='*',
help='Input files')
args = parser.parse_args(argv[1:])
inputs = [f'"{os.path.realpath(input)}"' for input in args.inputs]
data = fill_template(args.template, {'inputs': (' \\\n' + ' ' * 25).join(inputs)})
args.output.write(data)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))

View file

@ -1,5 +1,4 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. Getting started information is defined in the project README file.
.. include:: ../README.rst
:start-after: .. section-begin-getting-started

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: ../documentation-contents.rst
Using libcamera in a C++ application
====================================
@ -118,21 +116,19 @@ available.
.. code:: cpp
auto cameras = cm->cameras();
if (cameras.empty()) {
if (cm->cameras().empty()) {
std::cout << "No cameras were identified on the system."
<< std::endl;
cm->stop();
return EXIT_FAILURE;
}
std::string cameraId = cameras[0]->id();
std::string cameraId = cm->cameras()[0]->id();
camera = cm->get(cameraId);
/*
* Note that `camera` may not compare equal to `cameras[0]`.
* In fact, it might simply be a `nullptr`, as the particular
* device might have disappeared (and reappeared) in the meantime.
* Note that is equivalent to:
* camera = cm->cameras()[0];
*/
Once a camera has been selected an application needs to acquire an exclusive
@ -352,7 +348,7 @@ The libcamera library uses the concept of `signals and slots` (similar to `Qt
Signals and Slots`_) to connect events with callbacks to handle them.
.. _signals and slots: https://libcamera.org/api-html/classlibcamera_1_1Signal.html#details
.. _Qt Signals and Slots: https://doc.qt.io/qt-6/signalsandslots.html
.. _Qt Signals and Slots: https://doc.qt.io/qt-5/signalsandslots.html
The ``Camera`` device emits two signals that applications can connect to in
order to execute callbacks on frame completion events.
@ -483,7 +479,7 @@ instance. An example of how to write image data to disk is available in the
`FileSink class`_ which is a part of the ``cam`` utility application in the
libcamera repository.
.. _FileSink class: https://git.libcamera.org/libcamera/libcamera.git/tree/src/apps/cam/file_sink.cpp
.. _FileSink class: https://git.libcamera.org/libcamera/libcamera.git/tree/src/cam/file_sink.cpp
With the handling of this request completed, it is possible to re-use the
request and the associated buffers and re-queue it to the camera
@ -618,7 +614,7 @@ accordingly. In this example, the application file has been named
simple_cam = executable('simple-cam',
'simple-cam.cpp',
dependencies: dependency('libcamera'))
dependencies: dependency('libcamera', required : true))
The ``dependencies`` line instructs meson to ask ``pkgconfig`` (or ``cmake``) to
locate the ``libcamera`` library, which the test application will be

View file

@ -0,0 +1,319 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
Developers guide to libcamera
=============================
The Linux kernel handles multimedia devices through the 'Linux media' subsystem
and provides a set of APIs (application programming interfaces) known
collectively as V4L2 (`Video for Linux 2`_) and the `Media Controller`_ API
which provide an interface to interact and control media devices.
Included in this subsystem are drivers for camera sensors, CSI2 (Camera
Serial Interface) receivers, and ISPs (Image Signal Processors)
The usage of these drivers to provide a functioning camera stack is a
responsibility that lies in userspace which is commonly implemented separately
by vendors without a common architecture or API for application developers.
libcamera provides a complete camera stack for Linux based systems to abstract
functionality desired by camera application developers and process the
configuration of hardware and image control algorithms required to obtain
desirable results from the camera.
.. _Video for Linux 2: https://www.linuxtv.org/downloads/v4l-dvb-apis-new/userspace-api/v4l/v4l2.html
.. _Media Controller: https://www.linuxtv.org/downloads/v4l-dvb-apis-new/userspace-api/mediactl/media-controller.html
In this developers guide, we will explore the `Camera Stack`_ and how it is
can be visualised at a high level, and explore the internal `Architecture`_ of
the libcamera library with its components. The current `Platform Support`_ is
detailed, as well as an overview of the `Licensing`_ requirements of the
project.
This introduction is followed by a walkthrough tutorial to newcomers wishing to
support a new platform with the `Pipeline Handler Writers Guide`_ and for those
looking to make use of the libcamera native API an `Application Writers Guide`_
provides a tutorial of the key APIs exposed by libcamera.
.. _Pipeline Handler Writers Guide: pipeline-handler.html
.. _Application Writers Guide: application-developer.html
.. TODO: Correctly link to the other articles of the guide
Camera Stack
------------
The libcamera library is implemented in userspace, and makes use of underlying
kernel drivers that directly interact with hardware.
Applications can make use of libcamera through the native `libcamera API`_'s or
through an adaptation layer integrating libcamera into a larger framework.
.. _libcamera API: https://www.libcamera.org/api-html/index.html
::
Application Layer
/ +--------------+ +--------------+ +--------------+ +--------------+
| | Native | | Framework | | Native | | Android |
| | V4L2 | | Application | | libcamera | | Camera |
| | Application | | (gstreamer) | | Application | | Framework |
\ +--------------+ +--------------+ +--------------+ +--------------+
^ ^ ^ ^
| | | |
| | | |
v v | v
Adaptation Layer |
/ +--------------+ +--------------+ | +--------------+
| | V4L2 | | gstreamer | | | Android |
| | Compatibility| | element | | | Camera |
| | (preload) | |(libcamerasrc)| | | HAL |
\ +--------------+ +--------------+ | +--------------+
|
^ ^ | ^
| | | |
| | | |
v v v v
libcamera Framework
/ +--------------------------------------------------------------------+
| | |
| | libcamera |
| | |
\ +--------------------------------------------------------------------+
^ ^ ^
Userspace | | |
--------------------- | ---------------- | ---------------- | ---------------
Kernel | | |
v v v
+-----------+ +-----------+ +-----------+
| Media | <--> | Video | <--> | V4L2 |
| Device | | Device | | Subdev |
+-----------+ +-----------+ +-----------+
The camera stack comprises of four software layers. From bottom to top:
* The kernel drivers control the camera hardware and expose a low-level
interface to userspace through the Linux kernel V4L2 family of APIs
(Media Controller API, V4L2 Video Device API and V4L2 Subdev API).
* The libcamera framework is the core part of the stack. It handles all control
of the camera devices in its core component, libcamera, and exposes a native
C++ API to upper layers.
* The libcamera adaptation layer is an umbrella term designating the components
that interface to libcamera in other frameworks. Notable examples are the V4L2
compatibility layer, the gstreamer libcamera element, and the Android camera
HAL implementation based on libcamera which are provided as a part of the
libcamera project.
* The applications and upper level frameworks are based on the libcamera
framework or libcamera adaptation, and are outside of the scope of the
libcamera project, however example native applications (cam, qcam) are
provided for testing.
V4L2 Compatibility Layer
V4L2 compatibility is achieved through a shared library that traps all
accesses to camera devices and routes them to libcamera to emulate high-level
V4L2 camera devices. It is injected in a process address space through
``LD_PRELOAD`` and is completely transparent for applications.
The compatibility layer exposes camera device features on a best-effort basis,
and aims for the level of features traditionally available from a UVC camera
designed for video conferencing.
Android Camera HAL
Camera support for Android is achieved through a generic Android camera HAL
implementation on top of libcamera. The HAL implements features required by
Android and out of scope from libcamera, such as JPEG encoding support.
This component is used to provide support for ChromeOS platforms
GStreamer element (gstlibcamerasrc)
A `GStreamer element`_ is provided to allow capture from libcamera supported
devices through GStreamer pipelines, and connect to other elements for further
processing.
Development of this element is ongoing and is limited to a single stream.
Native libcamera API
Applications can make use of the libcamera API directly using the C++
API. An example application and walkthrough using the libcamera API can be
followed in the `Application Writers Guide`_
.. _GStreamer element: https://gstreamer.freedesktop.org/documentation/application-development/basics/elements.html
Architecture
------------
While offering a unified API towards upper layers, and presenting itself as a
single library, libcamera isn't monolithic. It exposes multiple components
through its public API and is built around a set of separate helpers internally.
Hardware abstractions are handled through the use of device-specific components
where required and dynamically loadable plugins are used to separate image
processing algorithms from the core libcamera codebase.
::
--------------------------< libcamera Public API >---------------------------
^ ^
| |
v v
+-------------+ +---------------------------------------------------+
| Camera | | Camera Device |
| Manager | | +-----------------------------------------------+ |
+-------------+ | | Device-Agnostic | |
^ | | | |
| | | +--------------------------+ |
| | | | ~~~~~~~~~~~~~~~~~~~~~~~ |
| | | | { +-----------------+ } |
| | | | } | //// Image //// | { |
| | | | <-> | / Processing // | } |
| | | | } | / Algorithms // | { |
| | | | { +-----------------+ } |
| | | | ~~~~~~~~~~~~~~~~~~~~~~~ |
| | | | ========================== |
| | | | +-----------------+ |
| | | | | // Pipeline /// | |
| | | | <-> | /// Handler /// | |
| | | | | /////////////// | |
| | +--------------------+ +-----------------+ |
| | Device-Specific |
| +---------------------------------------------------+
| ^ ^
| | |
v v v
+--------------------------------------------------------------------+
| Helpers and Support Classes |
| +-------------+ +-------------+ +-------------+ +-------------+ |
| | MC & V4L2 | | Buffers | | Sandboxing | | Plugins | |
| | Support | | Allocator | | IPC | | Manager | |
| +-------------+ +-------------+ +-------------+ +-------------+ |
| +-------------+ +-------------+ |
| | Pipeline | | ... | |
| | Runner | | | |
| +-------------+ +-------------+ |
+--------------------------------------------------------------------+
/// Device-Specific Components
~~~ Sandboxing
Camera Manager
The Camera Manager enumerates cameras and instantiates Pipeline Handlers to
manage each Camera that libcamera supports. The Camera Manager supports
hotplug detection and notification events when supported by the underlying
kernel devices.
There is only ever one instance of the Camera Manager running per application.
Each application's instance of the Camera Manager ensures that only a single
application can take control of a camera device at once.
Read the `Camera Manager API`_ documentation for more details.
.. _Camera Manager API: https://libcamera.org/api-html/classlibcamera_1_1CameraManager.html
Camera Device
The Camera class represents a single item of camera hardware that is capable
of producing one or more image streams, and provides the API to interact with
the underlying device.
If a system has multiple instances of the same hardware attached, each has its
own instance of the camera class.
The API exposes full control of the device to upper layers of libcamera through
the public API, making it the highest level object libcamera exposes, and the
object that all other API operations interact with from configuration to
capture.
Read the `Camera API`_ documentation for more details.
.. _Camera API: https://libcamera.org/api-html/classlibcamera_1_1Camera.html
Pipeline Handler
The Pipeline Handler manages the complex pipelines exposed by the kernel
drivers through the Media Controller and V4L2 APIs. It abstracts pipeline
handling to hide device-specific details from the rest of the library, and
implements both pipeline configuration based on stream configuration, and
pipeline runtime execution and scheduling when needed by the device.
The Pipeline Handler lives in the same process as the rest of the library, and
has access to all helpers and kernel camera-related devices.
Hardware abstraction is handled by device specific Pipeline Handlers which are
derived from the Pipeline Handler base class allowing commonality to be shared
among the implementations.
Derived pipeline handlers create Camera device instances based on the devices
they detect and support on the running system, and are responsible for
managing the interactions with a camera device.
More details can be found in the `PipelineHandler API`_ documentation, and the
`Pipeline Handler Writers Guide`_.
.. _PipelineHandler API: https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html
Image Processing Algorithms
An image processing algorithm (IPA) component is a loadable plugin that
implements 3A (Auto-Exposure, Auto-White Balance, and Auto-Focus) and other
algorithms.
The algorithms run on the CPU and interact with the camera devices through the
Pipeline Handler to control hardware image processing based on the parameters
supplied by upper layers, maintaining state and closing the control loop
of the ISP.
The component is sandboxed and can only interact with libcamera through the
API provided by the Pipeline Handler and an IPA has no direct access to kernel
camera devices.
Open source IPA modules built with libcamera can be run in the same process
space as libcamera, however external IPA modules are run in a separate process
from the main libcamera process. IPA modules have a restricted view of the
system, including no access to networking APIs and limited access to file
systems.
IPA modules are only required for platforms and devices with an ISP controlled
by the host CPU. Camera sensors which have an integrated ISP are not
controlled through the IPA module.
Platform Support
----------------
The library currently supports the following hardware platforms specifically
with dedicated pipeline handlers:
- Intel IPU3 (ipu3)
- Rockchip RK3399 (rkisp1)
- RaspberryPi 3 and 4 (rpi/vc4)
Furthermore, generic platform support is provided for the following:
- USB video device class cameras (uvcvideo)
- iMX7, Allwinner Sun6i (simple)
- Virtual media controller driver for test use cases (vimc)
Licensing
---------
The libcamera core, is covered by the `LGPL-2.1-or-later`_ license. Pipeline
Handlers are a part of the libcamera code base and need to be contributed
upstream by device vendors. IPA modules included in libcamera are covered by a
free software license, however third-parties may develop IPA modules outside of
libcamera and distribute them under a closed-source license, provided they do
not include source code from the libcamera project.
The libcamera project itself contains multiple libraries, applications and
utilities. Licenses are expressed through SPDX tags in text-based files that
support comments, and through the .reuse/dep5 file otherwise. A copy of all
licenses are stored in the LICENSES directory, and a full summary of the
licensing used throughout the project can be found in the COPYING.rst document.
Applications which link dynamically against libcamera and use only the public
API are an independent work of the authors and have no license restrictions
imposed upon them from libcamera.
.. _LGPL-2.1-or-later: https://spdx.org/licenses/LGPL-2.1-or-later.html

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: ../documentation-contents.rst
IPA Writer's Guide
==================

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: ../documentation-contents.rst
Pipeline Handler Writers Guide
==============================
@ -153,14 +151,13 @@ integrates with the libcamera build system, and a *vivid.cpp* file that matches
the name of the pipeline.
In the *meson.build* file, add the *vivid.cpp* file as a build source for
libcamera by adding it to the global meson ``libcamera_internal_sources``
variable:
libcamera by adding it to the global meson ``libcamera_sources`` variable:
.. code-block:: none
# SPDX-License-Identifier: CC0-1.0
libcamera_internal_sources += files([
libcamera_sources += files([
'vivid.cpp',
])
@ -186,7 +183,7 @@ to the libcamera build options in the top level ``meson_options.txt``.
option('pipelines',
type : 'array',
choices : ['ipu3', 'rkisp1', 'rpi/pisp', 'rpi/vc4', 'simple', 'uvcvideo', 'vimc', 'vivid'],
choices : ['ipu3', 'rkisp1', 'rpi/vc4', 'simple', 'uvcvideo', 'vimc', 'vivid'],
description : 'Select which pipeline handlers to include')
@ -213,7 +210,7 @@ implementations for the overridden class members.
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
int start(Camera *camera, const ControlList *controls) override;
void stopDevice(Camera *camera) override;
void stop(Camera *camera) override;
int queueRequestDevice(Camera *camera, Request *request) override;
@ -247,7 +244,7 @@ implementations for the overridden class members.
return -1;
}
void PipelineHandlerVivid::stopDevice(Camera *camera)
void PipelineHandlerVivid::stop(Camera *camera)
{
}
@ -261,7 +258,7 @@ implementations for the overridden class members.
return false;
}
REGISTER_PIPELINE_HANDLER(PipelineHandlerVivid, "vivid")
REGISTER_PIPELINE_HANDLER(PipelineHandlerVivid)
} /* namespace libcamera */
@ -269,8 +266,6 @@ Note that you must register the ``PipelineHandler`` subclass with the pipeline
handler factory using the `REGISTER_PIPELINE_HANDLER`_ macro which
registers it and creates a global symbol to reference the class and make it
available to try and match devices.
String "vivid" is the name assigned to the pipeline, matching the pipeline
subdirectory name in the source tree.
.. _REGISTER_PIPELINE_HANDLER: https://libcamera.org/api-html/pipeline__handler_8h.html
@ -521,14 +516,14 @@ handler and camera manager using `registerCamera`_.
Finally with a successful construction, we return 'true' indicating that the
PipelineHandler successfully matched and constructed a device.
.. _Camera::create: https://libcamera.org/internal-api-html/classlibcamera_1_1Camera.html#adf5e6c22411f953bfaa1ae21155d6c31
.. _Camera::create: https://libcamera.org/api-html/classlibcamera_1_1Camera.html#a453740e0d2a2f495048ae307a85a2574
.. _registerCamera: https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html#adf02a7f1bbd87aca73c0e8d8e0e6c98b
.. code-block:: cpp
std::set<Stream *> streams{ &data->stream_ };
std::shared_ptr<Camera> camera = Camera::create(std::move(data), data->video_->deviceName(), streams);
registerCamera(std::move(camera));
std::shared_ptr<Camera> camera = Camera::create(this, data->video_->deviceName(), streams);
registerCamera(std::move(camera), std::move(data));
return true;
@ -554,7 +549,8 @@ Our match function should now look like the following:
/* Create and register the camera. */
std::set<Stream *> streams{ &data->stream_ };
std::shared_ptr<Camera> camera = Camera::create(std::move(data), data->video_->deviceName(), streams);
const std::string &id = data->video_->deviceName();
std::shared_ptr<Camera> camera = Camera::create(data.release(), id, streams);
registerCamera(std::move(camera));
return true;
@ -591,12 +587,12 @@ immutable properties of the ``Camera`` device.
The libcamera controls and properties are defined in YAML form which is
processed to automatically generate documentation and interfaces. Controls are
defined by the src/libcamera/`control_ids_core.yaml`_ file and camera properties
are defined by src/libcamera/`property_ids_core.yaml`_.
defined by the src/libcamera/`control_ids.yaml`_ file and camera properties
are defined by src/libcamera/`properties_ids.yaml`_.
.. _controls framework: https://libcamera.org/api-html/controls_8h.html
.. _control_ids_core.yaml: https://libcamera.org/api-html/control__ids_8h.html
.. _property_ids_core.yaml: https://libcamera.org/api-html/property__ids_8h.html
.. _control_ids.yaml: https://libcamera.org/api-html/control__ids_8h.html
.. _properties_ids.yaml: https://libcamera.org/api-html/property__ids_8h.html
Pipeline handlers can optionally register the list of controls an application
can set as well as a list of immutable camera properties. Being both
@ -655,7 +651,7 @@ inline in our VividCameraData init:
ctrls.emplace(id, info);
}
controlInfo_ = ControlInfoMap(std::move(ctrls), controls::controls);
controlInfo_ = std::move(ctrls);
The ``properties_`` field is a list of ``ControlId`` instances
associated with immutable values, which represent static characteristics that can
@ -676,58 +672,6 @@ handling controls:
#include <libcamera/controls.h>
#include <libcamera/control_ids.h>
Vendor-specific controls and properties
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Vendor-specific controls and properties must be defined in a separate YAML file
and included in the build by defining the pipeline handler to file mapping in
include/libcamera/meson.build. These YAML files live in the src/libcamera
directory.
For example, adding a Raspberry Pi vendor control file for the PiSP pipeline
handler is done with the following mapping:
.. code-block:: meson
controls_map = {
'controls': {
'draft': 'control_ids_draft.yaml',
'libcamera': 'control_ids_core.yaml',
'rpi/pisp': 'control_ids_rpi.yaml',
},
'properties': {
'draft': 'property_ids_draft.yaml',
'libcamera': 'property_ids_core.yaml',
}
}
The pipeline handler named above must match the pipeline handler option string
specified in the meson build configuration.
Vendor-specific controls and properties must contain a `vendor: <vendor_string>`
tag in the YAML file. Every unique vendor tag must define a unique and
non-overlapping range of reserved control IDs in src/libcamera/control_ranges.yaml.
For example, the following block defines a vendor-specific control with the
`rpi` vendor tag:
.. code-block:: yaml
vendor: rpi
controls:
- PispConfigDumpFile:
type: string
description: |
Triggers the Raspberry Pi PiSP pipeline handler to generate a JSON
formatted dump of the Backend configuration to the filename given by the
value of the control.
The controls will be generated in the vendor-specific namespace
`libcamera::controls::rpi`. Additionally a `#define
LIBCAMERA_HAS_RPI_VENDOR_CONTROLS` will be available to allow applications to
test for the availability of these controls.
Generating a default configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -799,7 +743,8 @@ derived class, and assign it to a base class pointer.
.. code-block:: cpp
auto config = std::make_unique<VividCameraConfiguration>();
VividCameraData *data = cameraData(camera);
CameraConfiguration *config = new VividCameraConfiguration();
A ``CameraConfiguration`` is specific to each pipeline, so you can only create
it from the pipeline handler code path. Applications can also generate an empty
@ -827,7 +772,9 @@ To generate a ``StreamConfiguration``, you need a list of pixel formats and
frame sizes which are supported as outputs of the stream. You can fetch a map of
the ``V4LPixelFormat`` and ``SizeRange`` supported by the underlying output
device, but the pipeline handler needs to convert this to a
``libcamera::PixelFormat`` type to pass to applications.
``libcamera::PixelFormat`` type to pass to applications. We do this here using
``std::transform`` to convert the formats and populate a new ``PixelFormat`` map
as shown below.
Continue adding the following code example to our ``generateConfiguration``
implementation.
@ -837,12 +784,14 @@ implementation.
std::map<V4L2PixelFormat, std::vector<SizeRange>> v4l2Formats =
data->video_->formats();
std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
for (auto &[v4l2PixelFormat, sizes] : v4l2Formats) {
PixelFormat pixelFormat = v4l2PixelFormat.toPixelFormat();
if (pixelFormat.isValid())
deviceFormats.try_emplace(pixelFormat, std::move(sizes));
}
std::transform(v4l2Formats.begin(), v4l2Formats.end(),
std::inserter(deviceFormats, deviceFormats.begin()),
[&](const decltype(v4l2Formats)::value_type &format) {
return decltype(deviceFormats)::value_type{
format.first.toPixelFormat(),
format.second
};
});
The `StreamFormats`_ class holds information about the pixel formats and frame
sizes that a stream can support. The class groups size information by the pixel
@ -932,9 +881,9 @@ Add the following function implementation to your file:
StreamConfiguration &cfg = config_[0];
const std::vector<libcamera::PixelFormat> &formats = cfg.formats().pixelformats();
const std::vector<libcamera::PixelFormat> formats = cfg.formats().pixelformats();
if (std::find(formats.begin(), formats.end(), cfg.pixelFormat) == formats.end()) {
cfg.pixelFormat = formats[0];
cfg.pixelFormat = cfg.formats().pixelformats()[0];
LOG(VIVID, Debug) << "Adjusting format to " << cfg.pixelFormat.toString();
status = Adjusted;
}
@ -1152,7 +1101,7 @@ available to the devices which have to be started and ready to produce
images. At the end of a capture session the ``Camera`` device needs to be
stopped, to gracefully clean up any allocated memory and stop the hardware
devices. Pipeline handlers implement two functions for these purposes, the
``start()`` and ``stopDevice()`` functions.
``start()`` and ``stop()`` functions.
The memory initialization phase that happens at ``start()`` time serves to
configure video devices to be able to use memory buffers exported as dma-buf
@ -1255,8 +1204,8 @@ algorithms, or other devices you should also stop them.
.. _releaseBuffers: https://libcamera.org/api-html/classlibcamera_1_1V4L2VideoDevice.html#a191619c152f764e03bc461611f3fcd35
Of course we also need to handle the corresponding actions to stop streaming on
a device, Add the following to the ``stopDevice()`` function, to stop the
stream with the `streamOff`_ function and release all buffers.
a device, Add the following to the ``stop`` function, to stop the stream with
the `streamOff`_ function and release all buffers.
.. _streamOff: https://libcamera.org/api-html/classlibcamera_1_1V4L2VideoDevice.html#a61998710615bdf7aa25a046c8565ed66
@ -1344,7 +1293,7 @@ before being set.
continue;
}
int32_t value = std::lround(it.second.get<float>() * 128 + offset);
int32_t value = lroundf(it.second.get<float>() * 128 + offset);
controls.set(cid, std::clamp(value, 0, 255));
}
@ -1408,7 +1357,7 @@ value translation operations:
.. code-block:: cpp
#include <cmath>
#include <math.h>
Frame completion and event handling
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -1421,7 +1370,7 @@ emitted triggers the execution of the connected slots. A detailed description
of the libcamera implementation is available in the `libcamera Signal and Slot`_
classes documentation.
.. _Qt Signals and Slots: https://doc.qt.io/qt-6/signalsandslots.html
.. _Qt Signals and Slots: https://doc.qt.io/qt-5/signalsandslots.html
.. _libcamera Signal and Slot: https://libcamera.org/api-html/classlibcamera_1_1Signal.html#details
In order to notify applications about the availability of new frames and data,

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: ../documentation-contents.rst
Tracing Guide
=============

View file

@ -1,132 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
width="297mm"
height="210mm"
viewBox="0 0 297 210"
version="1.1"
id="svg1"
inkscape:version="1.3 (0e150ed6c4, 2023-07-21)"
sodipodi:docname="rotate0.svg"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<sodipodi:namedview
id="namedview1"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:showpageshadow="2"
inkscape:pageopacity="0.0"
inkscape:pagecheckerboard="0"
inkscape:deskcolor="#d1d1d1"
inkscape:document-units="mm"
showgrid="true"
showguides="false"
inkscape:zoom="1.4854147"
inkscape:cx="666.48052"
inkscape:cy="448.35962"
inkscape:window-width="1916"
inkscape:window-height="1040"
inkscape:window-x="0"
inkscape:window-y="38"
inkscape:window-maximized="1"
inkscape:current-layer="layer1">
<inkscape:grid
id="grid1"
units="px"
originx="0"
originy="0"
spacingx="0.26458334"
spacingy="0.26458333"
empcolor="#0000ff"
empopacity="0.25098039"
color="#0000ff"
opacity="0.1254902"
empspacing="5"
dotted="false"
gridanglex="30"
gridanglez="30"
visible="true" />
</sodipodi:namedview>
<defs
id="defs1" />
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1">
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;paint-order:markers stroke fill;stroke-dasharray:none"
id="rect1"
width="152.88184"
height="119.41136"
x="77.237244"
y="81.982094" />
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;paint-order:markers stroke fill;stroke-dasharray:none"
id="rect2"
width="49.755535"
height="36.468258"
x="92.612343"
y="98.912964" />
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect2-5"
width="49.755535"
height="36.468258"
x="167.25099"
y="98.912964" />
<g
id="g4"
transform="translate(-0.98582077)"
style="stroke-width:1.5875;stroke-dasharray:none">
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect3"
width="40.994682"
height="43.605846"
x="134.16664"
y="157.24184" />
<ellipse
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="path3"
cx="140.15703"
cy="176.44627"
rx="1.889045"
ry="1.925626" />
</g>
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="M 244.95942,81.765726 62.444825,81.97209 154.25639,28.65633 Z"
id="path4"
sodipodi:nodetypes="cccc" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:none"
d="m 199.76751,33.368887 0.0285,21.581353"
id="path5" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:none"
d="m 215.59016,33.189206 0.0959,31.330304"
id="path6" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 194.42835,33.189356 25.2821,-0.220612"
id="path7" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:none"
d="m 195.19248,33.096339 -0.0701,-5.375793 23.77787,-0.05613 0.0553,5.315811"
id="path8" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 194.20874,25.616264 25.25485,-0.02536"
id="path7-5"
sodipodi:nodetypes="cc" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 195.03436,26.298566 -0.0455,-5.426692 23.77787,-0.05613 0.0553,5.315811"
id="path8-9"
sodipodi:nodetypes="cccc" />
</g>
</svg>

Before

Width:  |  Height:  |  Size: 4.8 KiB

View file

@ -1,135 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
width="297mm"
height="210mm"
viewBox="0 0 297 210"
version="1.1"
id="svg1"
inkscape:version="1.3 (0e150ed6c4, 2023-07-21)"
sodipodi:docname="rotate0Mirror.svg"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<sodipodi:namedview
id="namedview1"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:showpageshadow="2"
inkscape:pageopacity="0.0"
inkscape:pagecheckerboard="0"
inkscape:deskcolor="#d1d1d1"
inkscape:document-units="mm"
showgrid="true"
showguides="false"
inkscape:zoom="0.82900578"
inkscape:cx="599.51331"
inkscape:cy="579.00682"
inkscape:window-width="1916"
inkscape:window-height="1040"
inkscape:window-x="0"
inkscape:window-y="38"
inkscape:window-maximized="1"
inkscape:current-layer="layer1">
<inkscape:grid
id="grid1"
units="px"
originx="0"
originy="0"
spacingx="0.26458334"
spacingy="0.26458333"
empcolor="#0000ff"
empopacity="0.25098039"
color="#0000ff"
opacity="0.1254902"
empspacing="5"
dotted="false"
gridanglex="30"
gridanglez="30"
visible="true" />
</sodipodi:namedview>
<defs
id="defs1" />
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1">
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect1"
width="152.88184"
height="119.41136"
x="-230.13463"
y="81.982094"
transform="scale(-1,1)" />
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect2"
width="49.755535"
height="36.468258"
x="-214.75954"
y="98.912964"
transform="scale(-1,1)" />
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect2-5"
width="49.755535"
height="36.468258"
x="-140.12088"
y="98.912964"
transform="scale(-1,1)" />
<g
id="g4"
transform="matrix(-1,0,0,1,308.35769,0)"
style="stroke-width:1.5875;stroke-dasharray:none">
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect3"
width="40.994682"
height="43.605846"
x="134.16664"
y="157.24184" />
<ellipse
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="path3"
cx="140.15703"
cy="176.44627"
rx="1.889045"
ry="1.925626" />
</g>
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="M 62.412454,81.765726 244.92705,81.97209 153.11548,28.65633 Z"
id="path4"
sodipodi:nodetypes="cccc" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 107.60436,33.368887 -0.0285,21.581353"
id="path5" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 91.781714,33.189206 -0.0959,31.330304"
id="path6" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="M 112.94352,33.189356 87.661424,32.968744"
id="path7" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 112.17939,33.096339 0.0701,-5.375793 -23.777866,-0.05613 -0.0553,5.315811"
id="path8" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="M 113.16313,25.616264 87.908284,25.590904"
id="path7-5"
sodipodi:nodetypes="cc" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 112.33751,26.298566 0.0455,-5.426692 -23.777866,-0.05613 -0.0553,5.315811"
id="path8-9"
sodipodi:nodetypes="cccc" />
</g>
</svg>

Before

Width:  |  Height:  |  Size: 4.9 KiB

View file

@ -1,135 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
width="297mm"
height="210mm"
viewBox="0 0 297 210"
version="1.1"
id="svg1"
inkscape:version="1.3 (0e150ed6c4, 2023-07-21)"
sodipodi:docname="rotate180.svg"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<sodipodi:namedview
id="namedview1"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:showpageshadow="2"
inkscape:pageopacity="0.0"
inkscape:pagecheckerboard="0"
inkscape:deskcolor="#d1d1d1"
inkscape:document-units="mm"
showgrid="true"
showguides="false"
inkscape:zoom="0.94272086"
inkscape:cx="467.79489"
inkscape:cy="423.24299"
inkscape:window-width="1916"
inkscape:window-height="1040"
inkscape:window-x="0"
inkscape:window-y="38"
inkscape:window-maximized="1"
inkscape:current-layer="layer1">
<inkscape:grid
id="grid1"
units="px"
originx="0"
originy="0"
spacingx="0.26458334"
spacingy="0.26458333"
empcolor="#0000ff"
empopacity="0.25098039"
color="#0000ff"
opacity="0.1254902"
empspacing="5"
dotted="false"
gridanglex="30"
gridanglez="30"
visible="true" />
</sodipodi:namedview>
<defs
id="defs1" />
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1">
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect1"
width="152.88184"
height="119.41136"
x="-230.13461"
y="-140.22527"
transform="scale(-1)" />
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect2"
width="49.755535"
height="36.468258"
x="-214.75951"
y="-123.2944"
transform="scale(-1)" />
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect2-5"
width="49.755535"
height="36.468258"
x="-140.12086"
y="-123.2944"
transform="scale(-1)" />
<g
id="g4"
transform="rotate(180,154.17884,111.10368)"
style="stroke-width:1.5875;stroke-dasharray:none">
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect3"
width="40.994682"
height="43.605846"
x="134.16664"
y="157.24184" />
<ellipse
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="path3"
cx="140.15703"
cy="176.44627"
rx="1.889045"
ry="1.925626" />
</g>
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 62.412437,140.44163 182.514593,-0.20636 -91.81156,53.31576 z"
id="path4"
sodipodi:nodetypes="cccc" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 107.60435,188.83847 -0.0285,-21.58135"
id="path5" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 91.781697,189.01815 -0.0959,-31.3303"
id="path6" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 112.94351,189.018 -25.282103,0.22061"
id="path7" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 112.17938,189.11102 0.0701,5.37579 -23.777873,0.0561 -0.0553,-5.31581"
id="path8" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 113.16312,196.59109 -25.254853,0.0254"
id="path7-5"
sodipodi:nodetypes="cc" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 112.3375,195.90879 0.0455,5.42669 -23.777873,0.0561 -0.0553,-5.31581"
id="path8-9"
sodipodi:nodetypes="cccc" />
</g>
</svg>

Before

Width:  |  Height:  |  Size: 4.9 KiB

View file

@ -1,135 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
width="297mm"
height="210mm"
viewBox="0 0 297 210"
version="1.1"
id="svg1"
inkscape:version="1.3 (0e150ed6c4, 2023-07-21)"
sodipodi:docname="rotate180Mirror.svg"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<sodipodi:namedview
id="namedview1"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:showpageshadow="2"
inkscape:pageopacity="0.0"
inkscape:pagecheckerboard="0"
inkscape:deskcolor="#d1d1d1"
inkscape:document-units="mm"
showgrid="true"
showguides="false"
inkscape:zoom="0.94272086"
inkscape:cx="467.79489"
inkscape:cy="423.24299"
inkscape:window-width="1916"
inkscape:window-height="1040"
inkscape:window-x="0"
inkscape:window-y="38"
inkscape:window-maximized="1"
inkscape:current-layer="layer1">
<inkscape:grid
id="grid1"
units="px"
originx="0"
originy="0"
spacingx="0.26458334"
spacingy="0.26458333"
empcolor="#0000ff"
empopacity="0.25098039"
color="#0000ff"
opacity="0.1254902"
empspacing="5"
dotted="false"
gridanglex="30"
gridanglez="30"
visible="true" />
</sodipodi:namedview>
<defs
id="defs1" />
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1">
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect1"
width="152.88184"
height="119.41136"
x="77.237228"
y="-140.22527"
transform="scale(1,-1)" />
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect2"
width="49.755535"
height="36.468258"
x="92.612335"
y="-123.2944"
transform="scale(1,-1)" />
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect2-5"
width="49.755535"
height="36.468258"
x="167.25098"
y="-123.2944"
transform="scale(1,-1)" />
<g
id="g4"
transform="matrix(1,0,0,-1,-0.98584226,222.20736)"
style="stroke-width:1.5875;stroke-dasharray:none">
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect3"
width="40.994682"
height="43.605846"
x="134.16664"
y="157.24184" />
<ellipse
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="path3"
cx="140.15703"
cy="176.44627"
rx="1.889045"
ry="1.925626" />
</g>
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="M 244.9594,140.44163 62.444808,140.23527 154.25637,193.55103 Z"
id="path4"
sodipodi:nodetypes="cccc" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 199.76749,188.83847 0.0285,-21.58135"
id="path5" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 215.59014,189.01815 0.0959,-31.3303"
id="path6" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 194.42833,189.018 25.2821,0.22061"
id="path7" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 195.19246,189.11102 -0.0701,5.37579 23.77787,0.0561 0.0553,-5.31581"
id="path8" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 194.20872,196.59109 25.25485,0.0254"
id="path7-5"
sodipodi:nodetypes="cc" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 195.03434,195.90879 -0.0455,5.42669 23.77787,0.0561 0.0553,-5.31581"
id="path8-9"
sodipodi:nodetypes="cccc" />
</g>
</svg>

Before

Width:  |  Height:  |  Size: 4.9 KiB

View file

@ -1,135 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
width="297mm"
height="210mm"
viewBox="0 0 297 210"
version="1.1"
id="svg1"
inkscape:version="1.3 (0e150ed6c4, 2023-07-21)"
sodipodi:docname="rotate270.svg"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<sodipodi:namedview
id="namedview1"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:showpageshadow="2"
inkscape:pageopacity="0.0"
inkscape:pagecheckerboard="0"
inkscape:deskcolor="#d1d1d1"
inkscape:document-units="mm"
showgrid="true"
showguides="false"
inkscape:zoom="0.94272086"
inkscape:cx="467.26451"
inkscape:cy="423.24299"
inkscape:window-width="1916"
inkscape:window-height="1040"
inkscape:window-x="0"
inkscape:window-y="38"
inkscape:window-maximized="1"
inkscape:current-layer="layer1">
<inkscape:grid
id="grid1"
units="px"
originx="0"
originy="0"
spacingx="0.26458334"
spacingy="0.26458333"
empcolor="#0000ff"
empopacity="0.25098039"
color="#0000ff"
opacity="0.1254902"
empspacing="5"
dotted="false"
gridanglex="30"
gridanglez="30"
visible="true" />
</sodipodi:namedview>
<defs
id="defs1" />
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1">
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect1"
width="152.88184"
height="119.41136"
x="-187.55237"
y="124.56432"
transform="rotate(-90)" />
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect2"
width="49.755535"
height="36.468258"
x="-172.17726"
y="141.49518"
transform="rotate(-90)" />
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect2-5"
width="49.755535"
height="36.468258"
x="-97.538612"
y="141.49518"
transform="rotate(-90)" />
<g
id="g4"
transform="rotate(-90,154.17883,111.5966)"
style="stroke-width:1.5875;stroke-dasharray:none">
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect3"
width="40.994682"
height="43.605846"
x="134.16664"
y="157.24184" />
<ellipse
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="path3"
cx="140.15703"
cy="176.44627"
rx="1.889045"
ry="1.925626" />
</g>
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="M 124.34796,19.830188 124.55432,202.34478 71.238559,110.53322 Z"
id="path4"
sodipodi:nodetypes="cccc" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 75.951119,65.022101 21.58135,-0.0285"
id="path5" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 75.771439,49.199448 31.330301,-0.0959"
id="path6" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="M 75.771589,70.361261 75.550979,45.079158"
id="path7" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 75.678569,69.597131 -5.37579,0.0701 -0.0561,-23.777873 5.31581,-0.0553"
id="path8" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 68.198499,70.580871 -0.0254,-25.254853"
id="path7-5"
sodipodi:nodetypes="cc" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 68.880799,69.755251 -5.42669,0.0455 -0.0561,-23.777873 5.31581,-0.0553"
id="path8-9"
sodipodi:nodetypes="cccc" />
</g>
</svg>

Before

Width:  |  Height:  |  Size: 4.9 KiB

View file

@ -1,135 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
width="297mm"
height="210mm"
viewBox="0 0 297 210"
version="1.1"
id="svg1"
inkscape:version="1.3 (0e150ed6c4, 2023-07-21)"
sodipodi:docname="rotate270Mirror.svg"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<sodipodi:namedview
id="namedview1"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:showpageshadow="2"
inkscape:pageopacity="0.0"
inkscape:pagecheckerboard="0"
inkscape:deskcolor="#d1d1d1"
inkscape:document-units="mm"
showgrid="true"
showguides="false"
inkscape:zoom="0.94272086"
inkscape:cx="467.79489"
inkscape:cy="423.24299"
inkscape:window-width="1916"
inkscape:window-height="1040"
inkscape:window-x="0"
inkscape:window-y="38"
inkscape:window-maximized="1"
inkscape:current-layer="layer1">
<inkscape:grid
id="grid1"
units="px"
originx="0"
originy="0"
spacingx="0.26458334"
spacingy="0.26458333"
empcolor="#0000ff"
empopacity="0.25098039"
color="#0000ff"
opacity="0.1254902"
empspacing="5"
dotted="false"
gridanglex="30"
gridanglez="30"
visible="true" />
</sodipodi:namedview>
<defs
id="defs1" />
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1">
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect1"
width="152.88184"
height="119.41136"
x="-187.55237"
y="-182.80751"
transform="matrix(0,-1,-1,0,0,0)" />
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect2"
width="49.755535"
height="36.468258"
x="-172.17726"
y="-165.87666"
transform="matrix(0,-1,-1,0,0,0)" />
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect2-5"
width="49.755535"
height="36.468258"
x="-97.538612"
y="-165.87666"
transform="matrix(0,-1,-1,0,0,0)" />
<g
id="g4"
transform="matrix(0,-1,-1,0,264.78961,265.77543)"
style="stroke-width:1.5875;stroke-dasharray:none">
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect3"
width="40.994682"
height="43.605846"
x="134.16664"
y="157.24184" />
<ellipse
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="path3"
cx="140.15703"
cy="176.44627"
rx="1.889045"
ry="1.925626" />
</g>
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 183.02388,19.830188 -0.20636,182.514592 53.31576,-91.81156 z"
id="path4"
sodipodi:nodetypes="cccc" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 231.42072,65.022101 -21.58135,-0.0285"
id="path5" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 231.6004,49.199448 -31.3303,-0.0959"
id="path6" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 231.60025,70.361261 0.22061,-25.282103"
id="path7" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 231.69327,69.597131 5.37579,0.0701 0.0561,-23.777873 -5.31581,-0.0553"
id="path8" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 239.17334,70.580871 0.0254,-25.254853"
id="path7-5"
sodipodi:nodetypes="cc" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 238.49104,69.755251 5.42669,0.0455 0.0561,-23.777873 -5.31581,-0.0553"
id="path8-9"
sodipodi:nodetypes="cccc" />
</g>
</svg>

Before

Width:  |  Height:  |  Size: 4.9 KiB

View file

@ -1,135 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
width="297mm"
height="210mm"
viewBox="0 0 297 210"
version="1.1"
id="svg1"
inkscape:version="1.3 (0e150ed6c4, 2023-07-21)"
sodipodi:docname="rotate90.svg"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<sodipodi:namedview
id="namedview1"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:showpageshadow="2"
inkscape:pageopacity="0.0"
inkscape:pagecheckerboard="0"
inkscape:deskcolor="#d1d1d1"
inkscape:document-units="mm"
showgrid="true"
showguides="false"
inkscape:zoom="0.94272086"
inkscape:cx="467.26451"
inkscape:cy="423.24299"
inkscape:window-width="1916"
inkscape:window-height="1040"
inkscape:window-x="0"
inkscape:window-y="38"
inkscape:window-maximized="1"
inkscape:current-layer="layer1">
<inkscape:grid
id="grid1"
units="px"
originx="0"
originy="0"
spacingx="0.26458334"
spacingy="0.26458333"
empcolor="#0000ff"
empopacity="0.25098039"
color="#0000ff"
opacity="0.1254902"
empspacing="5"
dotted="false"
gridanglex="30"
gridanglez="30"
visible="true" />
</sodipodi:namedview>
<defs
id="defs1" />
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1">
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect1"
width="152.88184"
height="119.41136"
x="34.65498"
y="-182.80751"
transform="rotate(90)" />
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect2"
width="49.755535"
height="36.468258"
x="50.030079"
y="-165.87665"
transform="rotate(90)" />
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect2-5"
width="49.755535"
height="36.468258"
x="124.66872"
y="-165.87665"
transform="rotate(90)" />
<g
id="g4"
transform="rotate(90,154.17885,110.61076)"
style="stroke-width:1.5875;stroke-dasharray:none">
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect3"
width="40.994682"
height="43.605846"
x="134.16664"
y="157.24184" />
<ellipse
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="path3"
cx="140.15703"
cy="176.44627"
rx="1.889045"
ry="1.925626" />
</g>
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 183.02388,202.37715 -0.20636,-182.51459 53.31576,91.81156 z"
id="path4"
sodipodi:nodetypes="cccc" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 231.42072,157.18524 -21.58135,0.0285"
id="path5" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 231.6004,173.00789 -31.3303,0.0959"
id="path6" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 231.60025,151.84608 0.22061,25.2821"
id="path7" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 231.69327,152.61021 5.37579,-0.0701 0.0561,23.77787 -5.31581,0.0553"
id="path8" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 239.17334,151.62647 0.0254,25.25485"
id="path7-5"
sodipodi:nodetypes="cc" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 238.49104,152.45209 5.42669,-0.0455 0.0561,23.77787 -5.31581,0.0553"
id="path8-9"
sodipodi:nodetypes="cccc" />
</g>
</svg>

Before

Width:  |  Height:  |  Size: 4.8 KiB

View file

@ -1,135 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
width="297mm"
height="210mm"
viewBox="0 0 297 210"
version="1.1"
id="svg1"
inkscape:version="1.3 (0e150ed6c4, 2023-07-21)"
sodipodi:docname="rotate90Mirror.svg"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<sodipodi:namedview
id="namedview1"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:showpageshadow="2"
inkscape:pageopacity="0.0"
inkscape:pagecheckerboard="0"
inkscape:deskcolor="#d1d1d1"
inkscape:document-units="mm"
showgrid="true"
showguides="false"
inkscape:zoom="0.94272086"
inkscape:cx="467.79489"
inkscape:cy="423.24299"
inkscape:window-width="1916"
inkscape:window-height="1040"
inkscape:window-x="0"
inkscape:window-y="38"
inkscape:window-maximized="1"
inkscape:current-layer="layer1">
<inkscape:grid
id="grid1"
units="px"
originx="0"
originy="0"
spacingx="0.26458334"
spacingy="0.26458333"
empcolor="#0000ff"
empopacity="0.25098039"
color="#0000ff"
opacity="0.1254902"
empspacing="5"
dotted="false"
gridanglex="30"
gridanglez="30"
visible="true" />
</sodipodi:namedview>
<defs
id="defs1" />
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1">
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect1"
width="152.88184"
height="119.41136"
x="34.65498"
y="124.56432"
transform="matrix(0,1,1,0,0,0)" />
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect2"
width="49.755535"
height="36.468258"
x="50.030079"
y="141.49519"
transform="matrix(0,1,1,0,0,0)" />
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect2-5"
width="49.755535"
height="36.468258"
x="124.66872"
y="141.49519"
transform="matrix(0,1,1,0,0,0)" />
<g
id="g4"
transform="matrix(0,1,1,0,42.582224,-43.56809)"
style="stroke-width:1.5875;stroke-dasharray:none">
<rect
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="rect3"
width="40.994682"
height="43.605846"
x="134.16664"
y="157.24184" />
<ellipse
style="fill:none;stroke:#000000;stroke-width:1.5875;stroke-dasharray:none;paint-order:markers stroke fill"
id="path3"
cx="140.15703"
cy="176.44627"
rx="1.889045"
ry="1.925626" />
</g>
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="M 124.34795,202.37715 124.55431,19.86256 71.238554,111.67412 Z"
id="path4"
sodipodi:nodetypes="cccc" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 75.951114,157.18524 21.58135,0.0285"
id="path5" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 75.771434,173.00789 31.330296,0.0959"
id="path6" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 75.771584,151.84608 -0.22061,25.2821"
id="path7" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 75.678564,152.61021 -5.37579,-0.0701 -0.0561,23.77787 5.31581,0.0553"
id="path8" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 68.198494,151.62647 -0.0254,25.25485"
id="path7-5"
sodipodi:nodetypes="cc" />
<path
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:1.5875;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 68.880794,152.45209 -5.42669,-0.0455 -0.0561,23.77787 5.31581,0.0553"
id="path8-9"
sodipodi:nodetypes="cccc" />
</g>
</svg>

Before

Width:  |  Height:  |  Size: 4.9 KiB

View file

@ -1,31 +1,25 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: introduction.rst
.. Front page matter is defined in the project README file.
.. include:: ../README.rst
:start-after: .. section-begin-libcamera
:end-before: .. section-end-libcamera
.. toctree::
:maxdepth: 1
:caption: Contents:
Home <self>
Docs <docs>
Contribute <contributing>
Getting Started <getting-started>
Developer Guide <guides/introduction>
Application Writer's Guide <guides/application-developer>
Camera Sensor Model <camera-sensor-model>
Environment variables <environment_variables>
Feature Requirements <feature_requirements>
IPA Writer's guide <guides/ipa>
Lens driver requirements <lens_driver_requirements>
libcamera Architecture <libcamera_architecture>
Pipeline Handler Writer's Guide <guides/pipeline-handler>
Python Bindings <python-bindings>
Sensor driver requirements <sensor_driver_requirements>
SoftwareISP Benchmarking <software-isp-benchmarking>
IPA Writer's guide <guides/ipa>
Tracing guide <guides/tracing>
Design document: AE <design/ae>
.. toctree::
:hidden:
introduction
Environment variables <environment_variables>
Sensor driver requirements <sensor_driver_requirements>
Lens driver requirements <lens_driver_requirements>
Python Bindings <python-bindings>

View file

@ -1,8 +0,0 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. _internal-api:
Internal API Reference
======================
:: Placeholder for Doxygen documentation

View file

@ -1,224 +0,0 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
************
Introduction
************
.. toctree::
:hidden:
API <api-html/index>
Internal API <internal-api-html/index>
What is libcamera?
==================
libcamera is an open source complex camera support library for Linux, Android
and ChromeOS. The library interfaces with Linux kernel device drivers and
provides an intuitive API to developers in order to simplify the complexity
involved in capturing images from complex cameras on Linux systems.
What is a "complex camera"?
===========================
A modern "camera" tends to infact be several different pieces of hardware which
must all be controlled together in order to produce and capture images of
appropriate quality. A hardware pipeline typically consists of a camera sensor
that captures raw frames and transmits them on a bus, a receiver that decodes
the bus signals, and an image signal processor that processes raw frames to
produce usable images in a standard format. The Linux kernel handles these
multimedia devices through the 'Linux media' subsystem and provides a set of
application programming interfaces known collectively as the
V4L2 (`Video for Linux 2`_) and the `Media Controller`_ APIs, which provide an
interface to interact and control media devices.
.. _Video for Linux 2: https://www.linuxtv.org/downloads/v4l-dvb-apis-new/userspace-api/v4l/v4l2.html
.. _Media Controller: https://www.linuxtv.org/downloads/v4l-dvb-apis-new/userspace-api/mediactl/media-controller.html
Included in this subsystem are drivers for camera sensors, CSI2 (Camera
Serial Interface) receivers, and ISPs (Image Signal Processors).
The usage of these drivers to provide a functioning camera stack is a
responsibility that lies in userspace, and is commonly implemented separately
by vendors without a common architecture or API for application developers. This
adds a lot of complexity to the task, particularly when considering that the
differences in hardware pipelines and their representation in the kernel's APIs
often necessitate bespoke handling.
What is libcamera for?
======================
libcamera provides a complete camera stack for Linux-based systems to abstract
the configuration of hardware and image control algorithms required to obtain
desirable results from the camera through the kernel's APIs, reducing those
operations to a simple and consistent method for developers. In short instead of
having to deal with this:
.. graphviz:: mali-c55.dot
you can instead simply deal with:
.. code-block:: python
>>> import libcamera as lc
>>> camera_manager = lc.CameraManager.singleton()
[0:15:59.582029920] [504] INFO Camera camera_manager.cpp:313 libcamera v0.3.0+182-01e57380
>>> for camera in camera_manager.cameras:
... print(f' - {camera.id}')
...
- mali-c55 tpg
- imx415 1-001a
The library handles the rest for you. These documentary pages give more
information on the internal workings of libcamera (and the kernel camera stack
that lies behind it) as well as guidance on using libcamera in an application or
extending the library with support for your hardware (through the pipeline
handler and IPA module writer's guides).
How should I use it?
====================
There are a few ways you might want to use libcamera, depending on your
application. It's always possible to use the library directly, and you can find
detailed information on how to do so in the
:doc:`application writer's guide <guides/application-developer>`.
It is often more appropriate to use one of the frameworks with libcamera
support. For example an application powering an embedded media device
incorporating capture, encoding and streaming of both video and audio would
benefit from using `GStreamer`_, for which libcamera provides a plugin.
Similarly an application for user-facing devices like a laptop would likely
benefit accessing cameras through the XDG camera portal and `pipewire`_, which
brings the advantages of resource sharing (multiple applications accessing the
stream at the same time) and access control.
.. _GStreamer: https://gstreamer.freedesktop.org/
.. _pipewire: https://pipewire.org/
Camera Stack
============
::
a c / +-------------+ +-------------+ +-------------+ +-------------+
p a | | Native | | Framework | | Native | | Android |
p t | | V4L2 | | Application | | libcamera | | Camera |
l i | | Application | | (gstreamer) | | Application | | Framework |
i o \ +-------------+ +-------------+ +-------------+ +-------------+
n ^ ^ ^ ^
| | | |
l a | | | |
i d v v | v
b a / +-------------+ +-------------+ | +-------------+
c p | | V4L2 | | Camera | | | Android |
a t | | Compat. | | Framework | | | Camera |
m a | | | | (gstreamer) | | | HAL |
e t \ +-------------+ +-------------+ | +-------------+
r i ^ ^ | ^
a o | | | |
n | | | |
/ | ,................................................
| | ! : Language : !
l f | | ! : Bindings : !
i r | | ! : (optional) : !
b a | | \...............................................'
c m | | | | |
a e | | | | |
m w | v v v v
e o | +----------------------------------------------------------------+
r r | | |
a k | | libcamera |
| | |
\ +----------------------------------------------------------------+
^ ^ ^
Userspace | | |
------------------------ | ---------------- | ---------------- | ---------------
Kernel | | |
v v v
+-----------+ +-----------+ +-----------+
| Media | <--> | Video | <--> | V4L2 |
| Device | | Device | | Subdev |
+-----------+ +-----------+ +-----------+
The camera stack comprises four software layers. From bottom to top:
* The kernel drivers control the camera hardware and expose a
low-level interface to userspace through the Linux kernel V4L2
family of APIs (Media Controller API, V4L2 Video Device API and
V4L2 Subdev API).
* The libcamera framework is the core part of the stack. It
handles all control of the camera devices in its core component,
libcamera, and exposes a native C++ API to upper layers. Optional
language bindings allow interfacing to libcamera from other
programming languages.
Those components live in the same source code repository and
all together constitute the libcamera framework.
* The libcamera adaptation is an umbrella term designating the
components that interface to libcamera in other frameworks.
Notable examples are a V4L2 compatibility layer, a gstreamer
libcamera element, and an Android camera HAL implementation based
on libcamera.
Those components can live in the libcamera project source code
in separate repositories, or move to their respective project's
repository (for instance the gstreamer libcamera element).
* The applications and upper level frameworks are based on the
libcamera framework or libcamera adaptation, and are outside of
the scope of the libcamera project.
V4L2 Compatibility Layer
V4L2 compatibility is achieved through a shared library that traps all
accesses to camera devices and routes them to libcamera to emulate high-level
V4L2 camera devices. It is injected in a process address space through
``LD_PRELOAD`` and is completely transparent for applications.
The compatibility layer exposes camera device features on a best-effort basis,
and aims for the level of features traditionally available from a UVC camera
designed for video conferencing.
Android Camera HAL
Camera support for Android is achieved through a generic Android camera HAL
implementation on top of libcamera. The HAL implements features required by
Android and out of scope from libcamera, such as JPEG encoding support.
This component is used to provide support for ChromeOS platforms.
GStreamer element (gstlibcamerasrc)
A `GStreamer element`_ is provided to allow capture from libcamera supported
devices through GStreamer pipelines, and connect to other elements for further
processing.
Native libcamera API
Applications can make use of the libcamera API directly using the C++
API. An example application and walkthrough using the libcamera API can be
followed in the :doc:`Application writer's guide </guides/application-developer>`
.. _GStreamer element: https://gstreamer.freedesktop.org/documentation/application-development/basics/elements.html
Licensing
=========
The libcamera core is covered by the `LGPL-2.1-or-later`_ license. Pipeline
Handlers are a part of the libcamera code base and need to be contributed
upstream by device vendors. IPA modules included in libcamera are covered by a
free software license, however third-parties may develop IPA modules outside of
libcamera and distribute them under a closed-source license, provided they do
not include source code from the libcamera project.
The libcamera project itself contains multiple libraries, applications and
utilities. Licenses are expressed through SPDX tags in text-based files that
support comments, and through the .reuse/dep5 file otherwise. A copy of all
licenses are stored in the LICENSES directory, and a full summary of the
licensing used throughout the project can be found in the COPYING.rst document.
Applications which link dynamically against libcamera and use only the public
API are an independent work of the authors and have no license restrictions
imposed upon them from libcamera.
.. _LGPL-2.1-or-later: https://spdx.org/licenses/LGPL-2.1-or-later.html

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
.. _lens-driver-requirements:
Lens Driver Requirements

View file

@ -1,168 +0,0 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
libcamera Architecture
======================
While offering a unified API towards upper layers, and presenting itself as a
single library, libcamera isn't monolithic. It exposes multiple components
through its public API and is built around a set of separate helpers internally.
Hardware abstractions are handled through the use of device-specific components
where required and dynamically loadable plugins are used to separate image
processing algorithms from the core libcamera codebase.
::
--------------------------< libcamera Public API >---------------------------
^ ^
| |
v v
+-------------+ +---------------------------------------------------+
| Camera | | Camera Device |
| Manager | | +-----------------------------------------------+ |
+-------------+ | | Device-Agnostic | |
^ | | | |
| | | +--------------------------+ |
| | | | ~~~~~~~~~~~~~~~~~~~~~~~ |
| | | | { +-----------------+ } |
| | | | } | //// Image //// | { |
| | | | <-> | / Processing // | } |
| | | | } | / Algorithms // | { |
| | | | { +-----------------+ } |
| | | | ~~~~~~~~~~~~~~~~~~~~~~~ |
| | | | ========================== |
| | | | +-----------------+ |
| | | | | // Pipeline /// | |
| | | | <-> | /// Handler /// | |
| | | | | /////////////// | |
| | +--------------------+ +-----------------+ |
| | Device-Specific |
| +---------------------------------------------------+
| ^ ^
| | |
v v v
+--------------------------------------------------------------------+
| Helpers and Support Classes |
| +-------------+ +-------------+ +-------------+ +-------------+ |
| | MC & V4L2 | | Buffers | | Sandboxing | | Plugins | |
| | Support | | Allocator | | IPC | | Manager | |
| +-------------+ +-------------+ +-------------+ +-------------+ |
| +-------------+ +-------------+ |
| | Pipeline | | ... | |
| | Runner | | | |
| +-------------+ +-------------+ |
+--------------------------------------------------------------------+
/// Device-Specific Components
~~~ Sandboxing
Camera Manager
The Camera Manager enumerates cameras and instantiates Pipeline Handlers to
manage each Camera that libcamera supports. The Camera Manager supports
hotplug detection and notification events when supported by the underlying
kernel devices.
There is only ever one instance of the Camera Manager running per application.
Each application's instance of the Camera Manager ensures that only a single
application can take control of a camera device at once.
Read the `Camera Manager API`_ documentation for more details.
.. _Camera Manager API: https://libcamera.org/api-html/classlibcamera_1_1CameraManager.html
Camera Device
The Camera class represents a single item of camera hardware that is capable
of producing one or more image streams, and provides the API to interact with
the underlying device.
If a system has multiple instances of the same hardware attached, each has its
own instance of the camera class.
The API exposes full control of the device to upper layers of libcamera through
the public API, making it the highest level object libcamera exposes, and the
object that all other API operations interact with from configuration to
capture.
Read the `Camera API`_ documentation for more details.
.. _Camera API: https://libcamera.org/api-html/classlibcamera_1_1Camera.html
Pipeline Handler
The Pipeline Handler manages the complex pipelines exposed by the kernel
drivers through the Media Controller and V4L2 APIs. It abstracts pipeline
handling to hide device-specific details from the rest of the library, and
implements both pipeline configuration based on stream configuration, and
pipeline runtime execution and scheduling when needed by the device.
The Pipeline Handler lives in the same process as the rest of the library, and
has access to all helpers and kernel camera-related devices.
Hardware abstraction is handled by device specific Pipeline Handlers which are
derived from the Pipeline Handler base class allowing commonality to be shared
among the implementations.
Derived pipeline handlers create Camera device instances based on the devices
they detect and support on the running system, and are responsible for
managing the interactions with a camera device.
More details can be found in the `PipelineHandler API`_ documentation, and the
:doc:`Pipeline Handler Writers Guide <guides/pipeline-handler>`.
.. _PipelineHandler API: https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html
Image Processing Algorithms
Together with the hardware image processing and hardware statistics
collection, the Image Processing Algorithms (IPA) implement 3A (Auto-Exposure,
Auto-White Balance and Auto-Focus) and other algorithms. They run on the CPU
and control hardware image processing based on the parameters supplied by
upper layers, closing the control loop of the ISP.
IPAs are loaded as external plugins named IPA Modules. IPA Modules can be part
of the libcamera code base or provided externally by camera vendors as
open-source or closed-source components.
Open source IPA Modules built with libcamera are run in the same process space
as libcamera. External IPA Modules are run in a separate sandboxed process. In
either case, they can only interact with libcamera through the API provided by
the Pipeline Handler. They have a restricted view of the system, with no direct
access to kernel camera devices, no access to networking APIs, and limited
access to file systems. All their accesses to image and metadata are mediated
by dmabuf instances explicitly passed by the Pipeline Handler to the IPA
Module.
IPA Modules are only required for platforms and devices with an ISP controlled
by the host CPU. Camera sensors which have an integrated ISP are not
controlled through the IPA Module.
Helpers and Support Classes
While Pipeline Handlers are device-specific, implementations are expected to
share code due to usage of identical APIs towards the kernel camera drivers
and the Image Processing Algorithms. This includes without limitation handling
of the MC and V4L2 APIs, buffer management through dmabuf, and pipeline
discovery, configuration and scheduling. Such code will be factored out to
helpers when applicable.
Other parts of libcamera will also benefit from factoring code out to
self-contained support classes, even if such code is present only once in the
code base, in order to keep the source code clean and easy to read. This
should be the case for instance for plugin management.
Platform Support
----------------
The library currently supports the following hardware platforms specifically
with dedicated pipeline handlers:
- Arm Mali-C55
- Intel IPU3 (ipu3)
- NXP i.MX8MP (imx8-isi and rkisp1)
- RaspberryPi 3, 4 and zero (rpi/vc4)
- Rockchip RK3399 (rkisp1)
Furthermore, generic platform support is provided for the following:
- USB video device class cameras (uvcvideo)
- iMX7, IPU6, Allwinner Sun6i (simple)
- Virtual media controller driver for test use cases (vimc)

View file

@ -1,33 +0,0 @@
/**
\mainpage libcamera API reference
Welcome to the API reference for <a href="https://libcamera.org/">libcamera</a>,
a complex camera support library for Linux, Android and ChromeOS. These pages
are automatically generated from the libcamera source code and describe the API
in detail - if this is your first interaction with libcamera then you may find
it useful to visit the [documentation](../introduction.html) in
the first instance, which can provide a more generic introduction to the
library's concepts.
\if internal
As a follow-on to the developer's guide, to assist you in adding support for
your platform the [pipeline handler writer's guide](../guides/pipeline-handler.html)
and the [ipa module writer's guide](../guides/ipa.html) should be helpful.
The full libcamera API is documented here. If you wish to see only the public
part of the API you can use [these pages](../api-html/index.html) instead.
\else
As a follow-on to the developer's guide, to assist you in using libcamera within
your project the [application developer's guide](../guides/application-developer.html)
gives an overview on how to achieve that.
Only the public part of the libcamera API is documented here; if you are a
developer seeking to add support for your hardware to the library or make other
improvements, you should switch to the internal API
[reference pages](../internal-api-html/index.html) instead.
\endif
*/

View file

@ -1,25 +0,0 @@
/* SPDX-License-Identifier: CC-BY-SA-4.0 */
digraph board {
rankdir=TB
n00000001 [label="{{} | mali-c55 tpg\n/dev/v4l-subdev0 | {<port0> 0}}", shape=Mrecord, style=filled, fillcolor=green]
n00000001:port0 -> n00000003:port0 [style=dashed]
n00000003 [label="{{<port0> 0 | <port4> 4} | mali-c55 isp\n/dev/v4l-subdev1 | {<port1> 1 | <port2> 2 | <port3> 3}}", shape=Mrecord, style=filled, fillcolor=green]
n00000003:port1 -> n00000009:port0 [style=bold]
n00000003:port2 -> n00000009:port2 [style=bold]
n00000003:port1 -> n0000000d:port0 [style=bold]
n00000003:port3 -> n0000001c
n00000009 [label="{{<port0> 0 | <port2> 2} | mali-c55 resizer fr\n/dev/v4l-subdev2 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
n00000009:port1 -> n00000010
n0000000d [label="{{<port0> 0} | mali-c55 resizer ds\n/dev/v4l-subdev3 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
n0000000d:port1 -> n00000014
n00000010 [label="mali-c55 fr\n/dev/video0", shape=box, style=filled, fillcolor=yellow]
n00000014 [label="mali-c55 ds\n/dev/video1", shape=box, style=filled, fillcolor=yellow]
n00000018 [label="mali-c55 3a params\n/dev/video2", shape=box, style=filled, fillcolor=yellow]
n00000018 -> n00000003:port4
n0000001c [label="mali-c55 3a stats\n/dev/video3", shape=box, style=filled, fillcolor=yellow]
n00000030 [label="{{<port0> 0} | lte-csi2-rx\n/dev/v4l-subdev4 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
n00000030:port1 -> n00000003:port0
n00000035 [label="{{} | imx415 1-001a\n/dev/v4l-subdev5 | {<port0> 0}}", shape=Mrecord, style=filled, fillcolor=green]
n00000035:port0 -> n00000030:port0 [style=bold]
}

View file

@ -15,7 +15,6 @@ if doxygen.found() and dot.found()
cdata.set('TOP_SRCDIR', meson.project_source_root())
cdata.set('TOP_BUILDDIR', meson.project_build_root())
cdata.set('OUTPUT_DIR', meson.current_build_dir())
cdata.set('WARN_AS_ERROR', get_option('doc_werror') ? 'YES' : 'NO')
doxygen_predefined = []
foreach key : config_h.keys()
@ -24,124 +23,60 @@ if doxygen.found() and dot.found()
cdata.set('PREDEFINED', ' \\\n\t\t\t '.join(doxygen_predefined))
doxyfile_common = configure_file(input : 'Doxyfile-common.in',
output : 'Doxyfile-common',
doxyfile = configure_file(input : 'Doxyfile.in',
output : 'Doxyfile',
configuration : cdata)
doxygen_public_input = [
libcamera_base_public_headers,
libcamera_base_public_sources,
libcamera_public_headers,
libcamera_public_sources,
]
doxygen_internal_input = [
libcamera_base_private_headers,
libcamera_base_internal_sources,
doxygen_input = [
doxyfile,
libcamera_base_headers,
libcamera_base_sources,
libcamera_internal_headers,
libcamera_internal_sources,
libcamera_ipa_headers,
libcamera_ipa_interfaces,
libcamera_public_headers,
libcamera_sources,
libipa_headers,
libipa_sources,
]
if is_variable('ipu3_ipa_sources')
doxygen_internal_input += [ipu3_ipa_sources]
doxygen_input += [ipu3_ipa_sources]
endif
# We run doxygen twice - the first run excludes internal API objects as it
# is intended to document the public API only. A second run covers all of
# the library's objects for libcamera developers. Common configuration is
# set in an initially generated Doxyfile, which is then included by the two
# final Doxyfiles.
# This is the "public" run of doxygen generating an abridged version of the
# API's documentation.
doxyfile_tmpl = configure_file(input : 'Doxyfile-public.in',
output : 'Doxyfile-public.tmpl',
configuration : cdata)
# The set of public input files stored in the doxygen_public_input array
# needs to be set in Doxyfile public. We can't pass them through cdata
# cdata, as some of the array members are custom_tgt instances, which
# configuration_data.set() doesn't support. Using a separate script invoked
# through custom_target(), which supports custom_tgt instances as inputs.
doxyfile = custom_target('doxyfile-public',
input : [
doxygen_public_input,
],
output : 'Doxyfile-public',
command : [
'gen-doxyfile.py',
'-o', '@OUTPUT@',
doxyfile_tmpl,
'@INPUT@',
])
custom_target('doxygen-public',
input : [
doxyfile,
doxyfile_common,
],
custom_target('doxygen',
input : doxygen_input,
output : 'api-html',
command : [doxygen, doxyfile],
install : true,
install_dir : doc_install_dir,
install_tag : 'doc')
# This is the internal documentation, which hard-codes a list of directories
# to parse in its doxyfile.
doxyfile = configure_file(input : 'Doxyfile-internal.in',
output : 'Doxyfile-internal',
configuration : cdata)
custom_target('doxygen-internal',
input : [
doxyfile,
doxyfile_common,
doxygen_internal_input,
],
output : 'internal-api-html',
command : [doxygen, doxyfile],
install : true,
install_dir : doc_install_dir,
install_tag : 'doc-internal')
install_dir : doc_install_dir)
endif
#
# Sphinx
#
sphinx = find_program('sphinx-build-3', 'sphinx-build',
required : get_option('documentation'))
sphinx = find_program('sphinx-build-3', required : false)
if not sphinx.found()
sphinx = find_program('sphinx-build', required : get_option('documentation'))
endif
if sphinx.found()
docs_sources = [
'camera-sensor-model.rst',
'code-of-conduct.rst',
'coding-style.rst',
'conf.py',
'contributing.rst',
'design/ae.rst',
'documentation-contents.rst',
'docs.rst',
'environment_variables.rst',
'feature_requirements.rst',
'guides/application-developer.rst',
'guides/introduction.rst',
'guides/ipa.rst',
'guides/pipeline-handler.rst',
'guides/tracing.rst',
'index.rst',
'introduction.rst',
'lens_driver_requirements.rst',
'libcamera_architecture.rst',
'mali-c55.dot',
'python-bindings.rst',
'sensor_driver_requirements.rst',
'software-isp-benchmarking.rst',
'../README.rst',
]
@ -154,8 +89,7 @@ if sphinx.found()
output : 'html',
build_by_default : true,
install : true,
install_dir : doc_install_dir,
install_tag : 'doc')
install_dir : doc_install_dir)
custom_target('documentation-linkcheck',
command : [sphinx, '-W', '-b', 'linkcheck', meson.current_source_dir(), '@OUTPUT@'],

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
.. _python-bindings:
Python Bindings for libcamera
@ -19,13 +17,13 @@ chapter lists the differences.
Mostly these differences fall under two categories:
1. Differences caused by the inherent differences between C++ and Python.
These differences are usually caused by the use of threads or differences in
C++ vs Python memory management.
These differences are usually caused by the use of threads or differences in
C++ vs Python memory management.
2. Differences caused by the code being work-in-progress. It's not always
trivial to create a binding in a satisfying way, and the current bindings
contain simplified versions of the C++ API just to get forward. These
differences are expected to eventually go away.
trivial to create a binding in a satisfying way, and the current bindings
contain simplified versions of the C++ API just to get forward. These
differences are expected to eventually go away.
Coding Style
------------

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
.. _sensor-driver-requirements:
Sensor Driver Requirements

File diff suppressed because it is too large Load diff

Before

Width:  |  Height:  |  Size: 171 KiB

File diff suppressed because it is too large Load diff

Before

Width:  |  Height:  |  Size: 80 KiB

View file

@ -1,79 +0,0 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
.. _software-isp-benchmarking:
Software ISP benchmarking
=========================
The Software ISP is particularly sensitive to performance regressions therefore
it is a good idea to always benchmark the Software ISP before and after making
changes to it and ensure that there are no performance regressions.
DebayerCpu class builtin benchmark
----------------------------------
The DebayerCpu class has a builtin benchmark. This benchmark measures the time
spent on processing (collecting statistics and debayering) only, it does not
measure the time spent on capturing or outputting the frames.
The builtin benchmark always runs. So this can be used by simply running "cam"
or "qcam" with a pipeline using the Software ISP.
When it runs it will skip measuring the first 30 frames to allow the caches and
the CPU temperature (turbo-ing) to warm-up and then it measures 30 fps and shows
the total and per frame processing time using an info level log message:
.. code-block:: text
INFO Debayer debayer_cpu.cpp:907 Processed 30 frames in 244317us, 8143 us/frame
To get stable measurements it is advised to disable any other processes which
may cause significant CPU usage (e.g. disable wifi, bluetooth and browsers).
When possible it is also advisable to disable CPU turbo-ing and
frequency-scaling.
For example when benchmarking on a Lenovo ThinkPad X1 Yoga Gen 8, with the
charger plugged in, the CPU can be fixed to run at 2 GHz using:
.. code-block:: shell
sudo x86_energy_perf_policy --turbo-enable 0
sudo cpupower frequency-set -d 2GHz -u 2GHz
with these settings the builtin bench reports a processing time of ~7.8ms/frame
on this laptop for FHD SGRBG10 (unpacked) bayer data.
Measuring power consumption
---------------------------
Since the Software ISP is often used on mobile devices it is also important to
measure power consumption and ensure that that does not regress.
For example to measure power consumption on a Lenovo ThinkPad X1 Yoga Gen 8 it
needs to be running on battery and it should be configured with its
platform-profile (/sys/firmware/acpi/platform_profile) set to balanced and with
its default turbo and frequency-scaling behavior to match real world usage.
Then start qcam to capture a FHD picture at 30 fps and position the qcam window
so that it is fully visible. After this run the following command to monitor the
power consumption:
.. code-block:: shell
watch -n 10 cat /sys/class/power_supply/BAT0/power_now /sys/class/hwmon/hwmon6/fan?_input
Note this not only measures the power consumption in µW it also monitors the
speed of this laptop's 2 fans. This is important because depending on the
ambient temperature the 2 fans may spin up while testing and this will cause an
additional power consumption of approx. 0.5 W messing up the measurement.
After starting qcam + the watch command let the laptop sit without using it for
2 minutes for the readings to stabilize. Then check that the fans have not
turned on and manually take a couple of consecutive power readings and average
these.
On the example Lenovo ThinkPad X1 Yoga Gen 8 laptop this results in a measured
power consumption of approx. 13 W while running qcam versus approx. 4-5 W while
setting idle with its OLED panel on.

View file

@ -33,6 +33,11 @@ SPDX-License-Identifier: CC-BY-SA-4.0
{% endif %}
{# RTD hosts this file, so just load on non RTD builds #}
{% if not READTHEDOCS %}
<link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css" />
{% endif %}
{% for cssfile in css_files %}
<link rel="stylesheet" href="{{ pathto(cssfile, 1) }}" type="text/css" />
{% endfor %}

View file

@ -283,13 +283,9 @@ div#signature {
font-size: 12px;
}
#licensing div.toctree-wrapper {
#libcamera div.toctree-wrapper {
height: 0px;
margin: 0px;
padding: 0px;
visibility: hidden;
}
.documentation-nav {
display: none;
}

View file

@ -1,44 +0,0 @@
/**
* \page thread-safety Reentrancy and Thread-Safety
*
* Through the documentation, several terms are used to define how classes and
* their member functions can be used from multiple threads.
*
* - A **reentrant** function may be called simultaneously from multiple
* threads if and only if each invocation uses a different instance of the
* class. This is the default for all member functions not explictly marked
* otherwise.
*
* - \anchor thread-safe A **thread-safe** function may be called
* simultaneously from multiple threads on the same instance of a class. A
* thread-safe function is thus reentrant. Thread-safe functions may also be
* called simultaneously with any other reentrant function of the same class
* on the same instance.
*
* \internal
* - \anchor thread-bound A **thread-bound** function may be called only from
* the thread that the class instances lives in (see section \ref
* thread-objects). For instances of classes that do not derive from the
* Object class, this is the thread in which the instance was created. A
* thread-bound function is not thread-safe, and may or may not be reentrant.
* \endinternal
*
* Neither reentrancy nor thread-safety, in this context, mean that a function
* may be called simultaneously from the same thread, for instance from a
* callback invoked by the function. This may deadlock and isn't allowed unless
* separately documented.
*
* \if internal
* A class is defined as reentrant, thread-safe or thread-bound if all its
* member functions are reentrant, thread-safe or thread-bound respectively.
* \else
* A class is defined as reentrant or thread-safe if all its member functions
* are reentrant or thread-safe respectively.
* \endif
* Some member functions may additionally be documented as having additional
* thread-related attributes.
*
* Most classes are reentrant but not thread-safe, as making them fully
* thread-safe would incur locking costs considered prohibitive for the
* expected use cases.
*/

View file

@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. section-begin-libcamera
===========
libcamera
===========
@ -20,6 +22,7 @@ open-source-friendly while still protecting vendor core IP. libcamera was born
out of that collaboration and will offer modern camera support to Linux-based
systems, including traditional Linux distributions, ChromeOS and Android.
.. section-end-libcamera
.. section-begin-getting-started
Getting Started
@ -27,7 +30,7 @@ Getting Started
To fetch the sources, build and install:
.. code::
::
git clone https://git.libcamera.org/libcamera/libcamera.git
cd libcamera
@ -44,7 +47,15 @@ A C++ toolchain: [required]
Either {g++, clang}
Meson Build system: [required]
meson (>= 0.63) ninja-build pkg-config
meson (>= 0.56) ninja-build pkg-config
If your distribution doesn't provide a recent enough version of meson,
you can install or upgrade it using pip3.
.. code::
pip3 install --user meson
pip3 install --user --upgrade meson
for the libcamera core: [required]
libyaml-dev python3-yaml python3-ply python3-jinja2
@ -72,9 +83,6 @@ for documentation: [optional]
for gstreamer: [optional]
libgstreamer1.0-dev libgstreamer-plugins-base1.0-dev
for Python bindings: [optional]
libpython3-dev pybind11-dev
for cam: [optional]
libevent-dev is required to support cam, however the following
optional dependencies bring more functionality to the cam test
@ -83,10 +91,9 @@ for cam: [optional]
- libdrm-dev: Enables the KMS sink
- libjpeg-dev: Enables MJPEG on the SDL sink
- libsdl2-dev: Enables the SDL sink
- libtiff-dev: Enables writing DNG
for qcam: [optional]
libtiff-dev qt6-base-dev
qtbase5-dev libqt5core5a libqt5gui5 libqt5widgets5 qttools5-dev-tools libtiff-dev
for tracing with lttng: [optional]
liblttng-ust-dev python3-jinja2 lttng-tools
@ -95,7 +102,7 @@ for android: [optional]
libexif-dev libjpeg-dev
for lc-compliance: [optional]
libevent-dev libgtest-dev
libevent-dev
for abi-compat.sh: [optional]
abi-compliance-checker
@ -118,13 +125,10 @@ setting the ``LIBCAMERA_LOG_LEVELS`` environment variable:
Using GStreamer plugin
~~~~~~~~~~~~~~~~~~~~~~
To use the GStreamer plugin from the source tree, use the meson ``devenv``
command. This will create a new shell instance with the ``GST_PLUGIN_PATH``
environment set accordingly.
To use GStreamer plugin from source tree, set the following environment so that
GStreamer can find it. This isn't necessary when libcamera is installed.
.. code::
meson devenv -C build
export GST_PLUGIN_PATH=$(pwd)/build/src/gstreamer
The debugging tool ``gst-launch-1.0`` can be used to construct a pipeline and
test it. The following pipeline will stream from the camera named "Camera 1"
@ -132,7 +136,7 @@ onto the OpenGL accelerated display element on your system.
.. code::
gst-launch-1.0 libcamerasrc camera-name="Camera 1" ! queue ! glimagesink
gst-launch-1.0 libcamerasrc camera-name="Camera 1" ! glimagesink
To show the first camera found you can omit the camera-name property, or you
can list the cameras and their capabilities using:
@ -147,7 +151,7 @@ if desired with a pipeline such as:
.. code::
gst-launch-1.0 libcamerasrc ! 'video/x-raw,width=1280,height=720' ! \
queue ! glimagesink
glimagesink
The libcamerasrc element has two log categories, named libcamera-provider (for
the video device provider) and libcamerasrc (for the operation of the camera).
@ -163,7 +167,7 @@ the following example could be used as a starting point:
gst-launch-1.0 libcamerasrc ! \
video/x-raw,colorimetry=bt709,format=NV12,width=1280,height=720,framerate=30/1 ! \
queue ! jpegenc ! multipartmux ! \
jpegenc ! multipartmux ! \
tcpserversink host=0.0.0.0 port=5000
Which can be received on another device over the network with:
@ -173,22 +177,6 @@ Which can be received on another device over the network with:
gst-launch-1.0 tcpclientsrc host=$DEVICE_IP port=5000 ! \
multipartdemux ! jpegdec ! autovideosink
The GStreamer element also supports multiple streams. This is achieved by
requesting additional source pads. Downstream caps filters can be used
to choose specific parameters like resolution and pixel format. The pad
property ``stream-role`` can be used to select a role.
The following example displays a 640x480 view finder while streaming JPEG
encoded 800x600 video. You can use the receiver pipeline above to view the
remote stream from another device.
.. code::
gst-launch-1.0 libcamerasrc name=cs src::stream-role=view-finder src_0::stream-role=video-recording \
cs.src ! queue ! video/x-raw,width=640,height=480 ! videoconvert ! autovideosink \
cs.src_0 ! queue ! video/x-raw,width=800,height=600 ! videoconvert ! \
jpegenc ! multipartmux ! tcpserversink host=0.0.0.0 port=5000
.. section-end-getting-started
Troubleshooting
@ -206,8 +194,8 @@ the build.ninja module. This is a snippet of the error message.
This can be solved in two ways:
1. Don't install meson again if it is already installed system-wide.
1) Don't install meson again if it is already installed system-wide.
2. If a version of meson which is different from the system-wide version is
already installed, uninstall that meson using pip3, and install again without
the --user argument.
2) If a version of meson which is different from the system-wide version is
already installed, uninstall that meson using pip3, and install again without
the --user argument.

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Ideas on Board Oy
*
* Call stack backtraces
* backtrace.h - Call stack backtraces
*/
#pragma once

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
* Method bind and invocation
* bound_method.h - Method bind and invocation
*/
#pragma once
@ -98,14 +98,20 @@ public:
using PackType = BoundMethodPack<R, Args...>;
private:
template<std::size_t... I>
void invokePack(BoundMethodPackBase *pack, std::index_sequence<I...>)
template<std::size_t... I, typename T = R>
std::enable_if_t<!std::is_void<T>::value, void>
invokePack(BoundMethodPackBase *pack, std::index_sequence<I...>)
{
[[maybe_unused]] auto *args = static_cast<PackType *>(pack);
if constexpr (!std::is_void_v<R>)
PackType *args = static_cast<PackType *>(pack);
args->ret_ = invoke(std::get<I>(args->args_)...);
else
}
template<std::size_t... I, typename T = R>
std::enable_if_t<std::is_void<T>::value, void>
invokePack(BoundMethodPackBase *pack, std::index_sequence<I...>)
{
/* args is effectively unused when the sequence I is empty. */
PackType *args [[gnu::unused]] = static_cast<PackType *>(pack);
invoke(std::get<I>(args->args_)...);
}

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
* Utilities and helpers for classes
* class.h - Utilities and helpers for classes
*/
#pragma once

View file

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021, Google Inc.
*
* compiler.h - Compiler support
*/
#pragma once
#if __cplusplus >= 201703L
#define __nodiscard [[nodiscard]]
#else
#define __nodiscard
#endif

View file

@ -2,11 +2,13 @@
/*
* Copyright (C) 2019, Google Inc.
*
* Event dispatcher
* event_dispatcher.h - Event dispatcher
*/
#pragma once
#include <vector>
#include <libcamera/base/private.h>
namespace libcamera {

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
* Poll-based event dispatcher
* event_dispatcher_poll.h - Poll-based event dispatcher
*/
#pragma once

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
* File descriptor event notifier
* event_notifier.h - File descriptor event notifier
*/
#pragma once

View file

@ -2,16 +2,16 @@
/*
* Copyright (C) 2020, Google Inc.
*
* File I/O operations
* file.h - File I/O operations
*/
#pragma once
#include <map>
#include <stdint.h>
#include <string>
#include <sys/types.h>
#include <map>
#include <string>
#include <libcamera/base/private.h>
#include <libcamera/base/class.h>

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
* Type-safe enum-based bitfields
* flags.h - Type-safe enum-based bitfields
*/
#pragma once

View file

@ -2,14 +2,13 @@
/*
* Copyright (C) 2018, Google Inc.
*
* Logging infrastructure
* log.h - Logging infrastructure
*/
#pragma once
#include <atomic>
#include <chrono>
#include <sstream>
#include <string_view>
#include <libcamera/base/private.h>
@ -30,29 +29,25 @@ enum LogSeverity {
class LogCategory
{
public:
static LogCategory *create(std::string_view name);
static LogCategory *create(const char *name);
const std::string &name() const { return name_; }
LogSeverity severity() const { return severity_.load(std::memory_order_relaxed); }
void setSeverity(LogSeverity severity) { severity_.store(severity, std::memory_order_relaxed); }
LogSeverity severity() const { return severity_; }
void setSeverity(LogSeverity severity);
static const LogCategory &defaultCategory();
private:
friend class Logger;
explicit LogCategory(std::string_view name);
explicit LogCategory(const char *name);
const std::string name_;
std::atomic<LogSeverity> severity_;
static_assert(decltype(severity_)::is_always_lock_free);
LogSeverity severity_;
};
#define LOG_DECLARE_CATEGORY(name) \
extern const LogCategory &_LOG_CATEGORY(name)();
#define LOG_DEFINE_CATEGORY(name) \
LOG_DECLARE_CATEGORY(name) \
const LogCategory &_LOG_CATEGORY(name)() \
{ \
/* The instance will be deleted by the Logger destructor. */ \
@ -65,7 +60,9 @@ class LogMessage
public:
LogMessage(const char *fileName, unsigned int line,
const LogCategory &category, LogSeverity severity,
std::string prefix = {});
const std::string &prefix = std::string());
LogMessage(LogMessage &&);
~LogMessage();
std::ostream &stream() { return msgStream_; }
@ -78,7 +75,9 @@ public:
const std::string msg() const { return msgStream_.str(); }
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(LogMessage)
LIBCAMERA_DISABLE_COPY(LogMessage)
void init(const char *fileName, unsigned int line);
std::ostringstream msgStream_;
const LogCategory &category_;

View file

@ -1,32 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2024, Ideas on Board Oy
*
* Anonymous file creation
*/
#pragma once
#include <libcamera/base/flags.h>
#include <libcamera/base/unique_fd.h>
namespace libcamera {
class MemFd
{
public:
enum class Seal {
None = 0,
Shrink = (1 << 0),
Grow = (1 << 1),
};
using Seals = Flags<Seal>;
static UniqueFD create(const char *name, std::size_t size,
Seals seals = Seal::None);
};
LIBCAMERA_FLAGS_ENABLE_OPERATORS(MemFd::Seal)
} /* namespace libcamera */

View file

@ -5,6 +5,7 @@ libcamera_base_include_dir = libcamera_include_dir / 'base'
libcamera_base_public_headers = files([
'bound_method.h',
'class.h',
'compiler.h',
'flags.h',
'object.h',
'shared_fd.h',
@ -20,7 +21,6 @@ libcamera_base_private_headers = files([
'event_notifier.h',
'file.h',
'log.h',
'memfd.h',
'message.h',
'mutex.h',
'private.h',

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
* Message queue support
* message.h - Message queue support
*/
#pragma once

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
* Mutex classes with clang thread safety annotation
* mutex.h - Mutex classes with clang thread safety annotation
*/
#pragma once
@ -23,6 +23,10 @@ namespace libcamera {
class LIBCAMERA_TSA_CAPABILITY("mutex") Mutex final
{
public:
constexpr Mutex()
{
}
void lock() LIBCAMERA_TSA_ACQUIRE()
{
mutex_.lock();
@ -80,6 +84,10 @@ private:
class ConditionVariable final
{
public:
ConditionVariable()
{
}
void notify_one() noexcept
{
cv_.notify_one();

View file

@ -2,18 +2,16 @@
/*
* Copyright (C) 2019, Google Inc.
*
* Base object
* object.h - Base object
*/
#pragma once
#include <list>
#include <memory>
#include <utility>
#include <vector>
#include <libcamera/base/bound_method.h>
#include <libcamera/base/class.h>
namespace libcamera {
@ -40,7 +38,7 @@ public:
{
T *obj = static_cast<T *>(this);
auto *method = new BoundMethodMember<T, R, FuncArgs...>(obj, this, func, type);
return method->activate(std::forward<Args>(args)..., true);
return method->activate(args..., true);
}
Thread *thread() const { return thread_; }
@ -51,11 +49,7 @@ public:
protected:
virtual void message(Message *msg);
bool assertThreadBound(const char *message);
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(Object)
friend class SignalBase;
friend class Thread;

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
* Private Header Validation
* private.h - Private Header Validation
*
* A selection of internal libcamera headers are installed as part
* of the libcamera package to allow sharing of a select subset of

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
* General-purpose counting semaphore
* semaphore.h - General-purpose counting semaphore
*/
#pragma once

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
* File descriptor wrapper with shared ownership
* shared_fd.h - File descriptor wrapper with shared ownership
*/
#pragma once

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
* Signal & slot implementation
* signal.h - Signal & slot implementation
*/
#pragma once
@ -10,13 +10,13 @@
#include <functional>
#include <list>
#include <type_traits>
#include <vector>
#include <libcamera/base/bound_method.h>
#include <libcamera/base/object.h>
namespace libcamera {
class Object;
class SignalBase
{
public:
@ -63,8 +63,11 @@ public:
#ifndef __DOXYGEN__
template<typename T, typename Func,
std::enable_if_t<std::is_base_of<Object, T>::value &&
std::is_invocable_v<Func, Args...>> * = nullptr>
std::enable_if_t<std::is_base_of<Object, T>::value
#if __cplusplus >= 201703L
&& std::is_invocable_v<Func, Args...>
#endif
> * = nullptr>
void connect(T *obj, Func func, ConnectionType type = ConnectionTypeAuto)
{
Object *object = static_cast<Object *>(obj);
@ -72,8 +75,11 @@ public:
}
template<typename T, typename Func,
std::enable_if_t<!std::is_base_of<Object, T>::value &&
std::is_invocable_v<Func, Args...>> * = nullptr>
std::enable_if_t<!std::is_base_of<Object, T>::value
#if __cplusplus >= 201703L
&& std::is_invocable_v<Func, Args...>
#endif
> * = nullptr>
#else
template<typename T, typename Func>
#endif

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
* C++20 std::span<> implementation for C++11
* span.h - C++20 std::span<> implementation for C++11
*/
#pragma once
@ -10,6 +10,7 @@
#include <array>
#include <iterator>
#include <limits>
#include <stddef.h>
#include <type_traits>
namespace libcamera {
@ -346,7 +347,13 @@ public:
}
constexpr Span(const Span &other) noexcept = default;
constexpr Span &operator=(const Span &other) noexcept = default;
constexpr Span &operator=(const Span &other) noexcept
{
data_ = other.data_;
size_ = other.size_;
return *this;
}
constexpr iterator begin() const { return data(); }
constexpr const_iterator cbegin() const { return begin(); }

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
* Thread support
* thread.h - Thread support
*/
#pragma once
@ -13,10 +13,8 @@
#include <libcamera/base/private.h>
#include <libcamera/base/class.h>
#include <libcamera/base/message.h>
#include <libcamera/base/signal.h>
#include <libcamera/base/span.h>
#include <libcamera/base/utils.h>
namespace libcamera {
@ -37,8 +35,6 @@ public:
void exit(int code = 0);
bool wait(utils::duration duration = utils::duration::max());
int setThreadAffinity(const Span<const unsigned int> &cpus);
bool isRunning();
Signal<> finished;
@ -48,21 +44,16 @@ public:
EventDispatcher *eventDispatcher();
void dispatchMessages(Message::Type type = Message::Type::None,
Object *receiver = nullptr);
void dispatchMessages(Message::Type type = Message::Type::None);
protected:
int exec();
virtual void run();
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(Thread)
void startThread();
void finishThread();
void setThreadAffinityInternal();
void postMessage(std::unique_ptr<Message> msg, Object *receiver);
void removeMessages(Object *receiver);

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
* Macro of Clang thread safety analysis
* thread_annotation.h - Macro of Clang thread safety analysis
*/
#pragma once

View file

@ -2,12 +2,13 @@
/*
* Copyright (C) 2019, Google Inc.
*
* Generic timer
* timer.h - Generic timer
*/
#pragma once
#include <chrono>
#include <stdint.h>
#include <libcamera/base/private.h>

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
* File descriptor wrapper that owns a file descriptor.
* unique_fd.h - File descriptor wrapper that owns a file descriptor.
*/
#pragma once
@ -10,6 +10,7 @@
#include <utility>
#include <libcamera/base/class.h>
#include <libcamera/base/compiler.h>
namespace libcamera {
@ -42,7 +43,7 @@ public:
return *this;
}
[[nodiscard]] int release()
__nodiscard int release()
{
int fd = fd_;
fd_ = -1;

View file

@ -2,20 +2,19 @@
/*
* Copyright (C) 2018, Google Inc.
*
* Miscellaneous utility functions
* utils.h - Miscellaneous utility functions
*/
#pragma once
#include <algorithm>
#include <chrono>
#include <functional>
#include <iterator>
#include <memory>
#include <ostream>
#include <sstream>
#include <stdint.h>
#include <string.h>
#include <string>
#include <string.h>
#include <sys/time.h>
#include <type_traits>
#include <utility>
@ -91,30 +90,6 @@ template<typename T,
_hex hex(T value, unsigned int width = 0);
#ifndef __DOXYGEN__
template<>
inline _hex hex<int8_t>(int8_t value, unsigned int width)
{
return { static_cast<uint64_t>(value), width ? width : 2 };
}
template<>
inline _hex hex<uint8_t>(uint8_t value, unsigned int width)
{
return { static_cast<uint64_t>(value), width ? width : 2 };
}
template<>
inline _hex hex<int16_t>(int16_t value, unsigned int width)
{
return { static_cast<uint64_t>(value), width ? width : 4 };
}
template<>
inline _hex hex<uint16_t>(uint16_t value, unsigned int width)
{
return { static_cast<uint64_t>(value), width ? width : 4 };
}
template<>
inline _hex hex<int32_t>(int32_t value, unsigned int width)
{
@ -205,16 +180,7 @@ public:
iterator &operator++();
std::string operator*() const;
bool operator==(const iterator &other) const
{
return pos_ == other.pos_;
}
bool operator!=(const iterator &other) const
{
return !(*this == other);
}
bool operator!=(const iterator &other) const;
private:
const StringSplitter *ss_;
@ -222,15 +188,8 @@ public:
std::string::size_type next_;
};
iterator begin() const
{
return { this, 0 };
}
iterator end() const
{
return { this, std::string::npos };
}
iterator begin() const;
iterator end() const;
private:
std::string str_;
@ -410,24 +369,6 @@ decltype(auto) abs_diff(const T &a, const T &b)
double strtod(const char *__restrict nptr, char **__restrict endptr);
template<class Enum>
constexpr std::underlying_type_t<Enum> to_underlying(Enum e) noexcept
{
return static_cast<std::underlying_type_t<Enum>>(e);
}
class ScopeExitActions
{
public:
~ScopeExitActions();
void operator+=(std::function<void()> &&action);
void release();
private:
std::vector<std::function<void()>> actions_;
};
} /* namespace utils */
#ifndef __DOXYGEN__

View file

@ -2,14 +2,13 @@
/*
* Copyright (C) 2018, Google Inc.
*
* Camera object interface
* camera.h - Camera object interface
*/
#pragma once
#include <initializer_list>
#include <memory>
#include <optional>
#include <set>
#include <stdint.h>
#include <string>
@ -20,10 +19,9 @@
#include <libcamera/base/signal.h>
#include <libcamera/controls.h>
#include <libcamera/geometry.h>
#include <libcamera/orientation.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
#include <libcamera/transform.h>
namespace libcamera {
@ -32,30 +30,6 @@ class FrameBufferAllocator;
class PipelineHandler;
class Request;
class SensorConfiguration
{
public:
unsigned int bitDepth = 0;
Rectangle analogCrop;
struct {
unsigned int binX = 1;
unsigned int binY = 1;
} binning;
struct {
unsigned int xOddInc = 1;
unsigned int xEvenInc = 1;
unsigned int yOddInc = 1;
unsigned int yEvenInc = 1;
} skipping;
Size outputSize;
bool isValid() const;
};
class CameraConfiguration
{
public:
@ -92,8 +66,7 @@ public:
bool empty() const;
std::size_t size() const;
std::optional<SensorConfiguration> sensorConfig;
Orientation orientation;
Transform transform;
protected:
CameraConfiguration();

View file

@ -2,14 +2,13 @@
/*
* Copyright (C) 2018, Google Inc.
*
* Camera management
* camera_manager.h - Camera management
*/
#pragma once
#include <memory>
#include <string>
#include <string_view>
#include <sys/types.h>
#include <vector>
@ -32,7 +31,7 @@ public:
void stop();
std::vector<std::shared_ptr<Camera>> cameras() const;
std::shared_ptr<Camera> get(std::string_view id);
std::shared_ptr<Camera> get(const std::string &id);
static const std::string &version() { return version_; }

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Raspberry Pi Ltd
*
* color space definitions
* color_space.h - color space definitions
*/
#pragma once

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
* {{mode|capitalize}} ID list
* control_ids.h - Control ID list
*
* This file is auto-generated. Do not edit.
*/
@ -10,52 +10,28 @@
#pragma once
#include <array>
#include <map>
#include <stdint.h>
#include <string>
#include <libcamera/controls.h>
namespace libcamera {
namespace {{mode}} {
namespace controls {
extern const ControlIdMap {{mode}};
{%- for vendor, ctrls in controls -%}
{% if vendor != 'libcamera' %}
namespace {{vendor}} {
#define LIBCAMERA_HAS_{{vendor|upper}}_VENDOR_{{mode|upper}}
{%- endif %}
{% if ctrls %}
enum {
{%- for ctrl in ctrls %}
{{ctrl.name|snake_case|upper}} = {{ctrl.id}},
{%- endfor %}
${ids}
};
{% endif %}
{% for ctrl in ctrls -%}
{% if ctrl.is_enum -%}
enum {{ctrl.name}}Enum {
{%- for enum in ctrl.enum_values %}
{{enum.name}} = {{enum.value}},
{%- endfor %}
};
extern const std::array<const ControlValue, {{ctrl.enum_values_count}}> {{ctrl.name}}Values;
extern const std::map<std::string, {{ctrl.type}}> {{ctrl.name}}NameValueMap;
{% endif -%}
extern const Control<{{ctrl.type}}> {{ctrl.name}};
{% endfor -%}
${controls}
{% if vendor != 'libcamera' %}
} /* namespace {{vendor}} */
{% endif -%}
extern const ControlIdMap controls;
{% endfor %}
} /* namespace {{mode}} */
namespace draft {
${draft_controls}
} /* namespace draft */
} /* namespace controls */
} /* namespace libcamera */

View file

@ -2,13 +2,12 @@
/*
* Copyright (C) 2019, Google Inc.
*
* Control handling
* controls.h - Control handling
*/
#pragma once
#include <assert.h>
#include <map>
#include <optional>
#include <set>
#include <stdint.h>
@ -17,7 +16,6 @@
#include <vector>
#include <libcamera/base/class.h>
#include <libcamera/base/flags.h>
#include <libcamera/base/span.h>
#include <libcamera/geometry.h>
@ -30,102 +28,67 @@ enum ControlType {
ControlTypeNone,
ControlTypeBool,
ControlTypeByte,
ControlTypeUnsigned16,
ControlTypeUnsigned32,
ControlTypeInteger32,
ControlTypeInteger64,
ControlTypeFloat,
ControlTypeString,
ControlTypeRectangle,
ControlTypeSize,
ControlTypePoint,
};
namespace details {
template<typename T, typename = std::void_t<>>
template<typename T>
struct control_type {
};
template<>
struct control_type<void> {
static constexpr ControlType value = ControlTypeNone;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<bool> {
static constexpr ControlType value = ControlTypeBool;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<uint8_t> {
static constexpr ControlType value = ControlTypeByte;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<uint16_t> {
static constexpr ControlType value = ControlTypeUnsigned16;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<uint32_t> {
static constexpr ControlType value = ControlTypeUnsigned32;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<int32_t> {
static constexpr ControlType value = ControlTypeInteger32;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<int64_t> {
static constexpr ControlType value = ControlTypeInteger64;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<float> {
static constexpr ControlType value = ControlTypeFloat;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<std::string> {
static constexpr ControlType value = ControlTypeString;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<Rectangle> {
static constexpr ControlType value = ControlTypeRectangle;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<Size> {
static constexpr ControlType value = ControlTypeSize;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<Point> {
static constexpr ControlType value = ControlTypePoint;
static constexpr std::size_t size = 0;
};
template<typename T, std::size_t N>
struct control_type<Span<T, N>, std::enable_if_t<control_type<std::remove_cv_t<T>>::size == 0>> : public control_type<std::remove_cv_t<T>> {
static constexpr std::size_t size = N;
};
template<typename T>
struct control_type<T, std::enable_if_t<std::is_enum_v<T> && sizeof(T) == sizeof(int32_t)>> : public control_type<int32_t> {
struct control_type<Span<T, N>> : public control_type<std::remove_cv_t<T>> {
};
} /* namespace details */
@ -250,44 +213,23 @@ private:
class ControlId
{
public:
enum class Direction {
In = (1 << 0),
Out = (1 << 1),
};
using DirectionFlags = Flags<Direction>;
ControlId(unsigned int id, const std::string &name, const std::string &vendor,
ControlType type, DirectionFlags direction,
std::size_t size = 0,
const std::map<std::string, int32_t> &enumStrMap = {});
ControlId(unsigned int id, const std::string &name, ControlType type)
: id_(id), name_(name), type_(type)
{
}
unsigned int id() const { return id_; }
const std::string &name() const { return name_; }
const std::string &vendor() const { return vendor_; }
ControlType type() const { return type_; }
DirectionFlags direction() const { return direction_; }
bool isInput() const { return !!(direction_ & Direction::In); }
bool isOutput() const { return !!(direction_ & Direction::Out); }
bool isArray() const { return size_ > 0; }
std::size_t size() const { return size_; }
const std::map<int32_t, std::string> &enumerators() const { return reverseMap_; }
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(ControlId)
unsigned int id_;
std::string name_;
std::string vendor_;
ControlType type_;
DirectionFlags direction_;
std::size_t size_;
std::map<std::string, int32_t> enumStrMap_;
std::map<int32_t, std::string> reverseMap_;
};
LIBCAMERA_FLAGS_ENABLE_OPERATORS(ControlId::Direction)
static inline bool operator==(unsigned int lhs, const ControlId &rhs)
{
return lhs == rhs.id();
@ -314,11 +256,8 @@ class Control : public ControlId
public:
using type = T;
Control(unsigned int id, const char *name, const char *vendor,
ControlId::DirectionFlags direction,
const std::map<std::string, int32_t> &enumStrMap = {})
: ControlId(id, name, vendor, details::control_type<std::remove_cv_t<T>>::value,
direction, details::control_type<std::remove_cv_t<T>>::size, enumStrMap)
Control(unsigned int id, const char *name)
: ControlId(id, name, details::control_type<std::remove_cv_t<T>>::value)
{
}
@ -413,11 +352,6 @@ private:
using ControlListMap = std::unordered_map<unsigned int, ControlValue>;
public:
enum class MergePolicy {
KeepExisting = 0,
OverwriteExisting,
};
ControlList();
ControlList(const ControlIdMap &idmap, const ControlValidator *validator = nullptr);
ControlList(const ControlInfoMap &infoMap, const ControlValidator *validator = nullptr);
@ -434,7 +368,7 @@ public:
std::size_t size() const { return controls_.size(); }
void clear() { controls_.clear(); }
void merge(const ControlList &source, MergePolicy policy = MergePolicy::KeepExisting);
void merge(const ControlList &source);
bool contains(unsigned int id) const;

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
* Synchronization fence
* internal/fence.h - Synchronization fence
*/
#pragma once

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Google Inc.
*
* Formats
* formats.h - Formats
*
* This file is auto-generated. Do not edit.
*/

View file

@ -2,11 +2,12 @@
/*
* Copyright (C) 2019, Google Inc.
*
* Frame buffer handling
* framebuffer.h - Frame buffer handling
*/
#pragma once
#include <assert.h>
#include <limits>
#include <memory>
#include <stdint.h>
@ -26,7 +27,6 @@ struct FrameMetadata {
FrameSuccess,
FrameError,
FrameCancelled,
FrameStartup,
};
struct Plane {

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
* FrameBuffer allocator
* framebuffer_allocator.h - FrameBuffer allocator
*/
#pragma once

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
* Geometry-related classes
* geometry.h - Geometry-related classes
*/
#pragma once
@ -11,6 +11,8 @@
#include <ostream>
#include <string>
#include <libcamera/base/compiler.h>
namespace libcamera {
class Rectangle;
@ -108,7 +110,7 @@ public:
return *this;
}
[[nodiscard]] constexpr Size alignedDownTo(unsigned int hAlignment,
__nodiscard constexpr Size alignedDownTo(unsigned int hAlignment,
unsigned int vAlignment) const
{
return {
@ -117,7 +119,7 @@ public:
};
}
[[nodiscard]] constexpr Size alignedUpTo(unsigned int hAlignment,
__nodiscard constexpr Size alignedUpTo(unsigned int hAlignment,
unsigned int vAlignment) const
{
return {
@ -126,7 +128,7 @@ public:
};
}
[[nodiscard]] constexpr Size boundedTo(const Size &bound) const
__nodiscard constexpr Size boundedTo(const Size &bound) const
{
return {
std::min(width, bound.width),
@ -134,7 +136,7 @@ public:
};
}
[[nodiscard]] constexpr Size expandedTo(const Size &expand) const
__nodiscard constexpr Size expandedTo(const Size &expand) const
{
return {
std::max(width, expand.width),
@ -142,7 +144,7 @@ public:
};
}
[[nodiscard]] constexpr Size grownBy(const Size &margins) const
__nodiscard constexpr Size grownBy(const Size &margins) const
{
return {
width + margins.width,
@ -150,7 +152,7 @@ public:
};
}
[[nodiscard]] constexpr Size shrunkBy(const Size &margins) const
__nodiscard constexpr Size shrunkBy(const Size &margins) const
{
return {
width > margins.width ? width - margins.width : 0,
@ -158,10 +160,10 @@ public:
};
}
[[nodiscard]] Size boundedToAspectRatio(const Size &ratio) const;
[[nodiscard]] Size expandedToAspectRatio(const Size &ratio) const;
__nodiscard Size boundedToAspectRatio(const Size &ratio) const;
__nodiscard Size expandedToAspectRatio(const Size &ratio) const;
[[nodiscard]] Rectangle centeredTo(const Point &center) const;
__nodiscard Rectangle centeredTo(const Point &center) const;
Size operator*(float factor) const;
Size operator/(float factor) const;
@ -260,15 +262,6 @@ public:
{
}
constexpr Rectangle(const Point &point1, const Point &point2)
: Rectangle(std::min(point1.x, point2.x), std::min(point1.y, point2.y),
static_cast<unsigned int>(std::max(point1.x, point2.x)) -
static_cast<unsigned int>(std::min(point1.x, point2.x)),
static_cast<unsigned int>(std::max(point1.y, point2.y)) -
static_cast<unsigned int>(std::min(point1.y, point2.y)))
{
}
int x;
int y;
unsigned int width;
@ -292,14 +285,11 @@ public:
Rectangle &scaleBy(const Size &numerator, const Size &denominator);
Rectangle &translateBy(const Point &point);
[[nodiscard]] Rectangle boundedTo(const Rectangle &bound) const;
[[nodiscard]] Rectangle enclosedIn(const Rectangle &boundary) const;
[[nodiscard]] Rectangle scaledBy(const Size &numerator,
__nodiscard Rectangle boundedTo(const Rectangle &bound) const;
__nodiscard Rectangle enclosedIn(const Rectangle &boundary) const;
__nodiscard Rectangle scaledBy(const Size &numerator,
const Size &denominator) const;
[[nodiscard]] Rectangle translatedBy(const Point &point) const;
Rectangle transformedBetween(const Rectangle &source,
const Rectangle &target) const;
__nodiscard Rectangle translatedBy(const Point &point) const;
};
bool operator==(const Rectangle &lhs, const Rectangle &rhs);

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
* Bayer Pixel Format
* bayer_format.h - Bayer Pixel Format
*/
#pragma once
@ -34,8 +34,6 @@ public:
None = 0,
CSI2 = 1,
IPU3 = 2,
PISP1 = 3,
PISP2 = 4,
};
constexpr BayerFormat()

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
* Byte stream buffer
* byte_stream_buffer.h - Byte stream buffer
*/
#pragma once

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2021, Google Inc.
*
* Camera private data
* camera.h - Camera private data
*/
#pragma once
@ -11,7 +11,6 @@
#include <list>
#include <memory>
#include <set>
#include <stdint.h>
#include <string>
#include <libcamera/base/class.h>
@ -33,7 +32,6 @@ public:
~Private();
PipelineHandler *pipe() { return pipe_.get(); }
const PipelineHandler *pipe() const { return pipe_.get(); }
std::list<Request *> queuedRequests_;
ControlInfoMap controlInfo_;

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
* Camera controls
* camera_controls.h - Camera controls
*/
#pragma once

View file

@ -2,12 +2,11 @@
/*
* Copyright (C) 2021, Google Inc.
*
* A camera lens controller
* camera_lens.h - A camera lens controller
*/
#pragma once
#include <memory>
#include <stdint.h>
#include <string>
#include <libcamera/base/class.h>

View file

@ -2,13 +2,14 @@
/*
* Copyright (C) 2023, Ideas on Board Oy.
*
* Camera manager private data
* camera_manager.h - Camera manager private data
*/
#pragma once
#include <libcamera/camera_manager.h>
#include <map>
#include <memory>
#include <sys/types.h>
#include <vector>
@ -18,14 +19,13 @@
#include <libcamera/base/thread.h>
#include <libcamera/base/thread_annotations.h>
#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/process.h"
namespace libcamera {
class Camera;
class DeviceEnumerator;
class IPAManager;
class PipelineHandlerFactoryBase;
class CameraManager::Private : public Extensible::Private, public Thread
{
@ -38,15 +38,12 @@ public:
void addCamera(std::shared_ptr<Camera> camera) LIBCAMERA_TSA_EXCLUDES(mutex_);
void removeCamera(std::shared_ptr<Camera> camera) LIBCAMERA_TSA_EXCLUDES(mutex_);
IPAManager *ipaManager() const { return ipaManager_.get(); }
protected:
void run() override;
private:
int init();
void createPipelineHandlers();
void pipelineFactoryMatch(const PipelineHandlerFactoryBase *factory);
void cleanup() LIBCAMERA_TSA_EXCLUDES(mutex_);
/*
@ -64,7 +61,7 @@ private:
std::unique_ptr<DeviceEnumerator> enumerator_;
std::unique_ptr<IPAManager> ipaManager_;
IPAManager ipaManager_;
ProcessManager processManager_;
};

View file

@ -2,130 +2,115 @@
/*
* Copyright (C) 2019, Google Inc.
*
* A camera sensor
* camera_sensor.h - A camera sensor
*/
#pragma once
#include <memory>
#include <stdint.h>
#include <string>
#include <variant>
#include <vector>
#include <libcamera/base/class.h>
#include <libcamera/base/log.h>
#include <libcamera/control_ids.h>
#include <libcamera/controls.h>
#include <libcamera/geometry.h>
#include <libcamera/orientation.h>
#include <libcamera/transform.h>
#include "libcamera/internal/bayer_format.h"
#include "libcamera/internal/camera_sensor_properties.h"
#include <libcamera/ipa/core_ipa_interface.h>
#include "libcamera/internal/formats.h"
#include "libcamera/internal/v4l2_subdevice.h"
namespace libcamera {
class BayerFormat;
class CameraLens;
class MediaEntity;
class SensorConfiguration;
enum class Orientation;
struct CameraSensorProperties;
struct IPACameraSensorInfo;
class CameraSensor
class CameraSensor : protected Loggable
{
public:
virtual ~CameraSensor();
explicit CameraSensor(const MediaEntity *entity);
~CameraSensor();
virtual const std::string &model() const = 0;
virtual const std::string &id() const = 0;
int init();
virtual const MediaEntity *entity() const = 0;
virtual V4L2Subdevice *device() = 0;
const std::string &model() const { return model_; }
const std::string &id() const { return id_; }
const MediaEntity *entity() const { return entity_; }
const std::vector<unsigned int> &mbusCodes() const { return mbusCodes_; }
std::vector<Size> sizes(unsigned int mbusCode) const;
Size resolution() const;
const std::vector<controls::draft::TestPatternModeEnum> &testPatternModes() const
{
return testPatternModes_;
}
int setTestPatternMode(controls::draft::TestPatternModeEnum mode);
virtual CameraLens *focusLens() = 0;
V4L2SubdeviceFormat getFormat(const std::vector<unsigned int> &mbusCodes,
const Size &size) const;
int setFormat(V4L2SubdeviceFormat *format,
Transform transform = Transform::Identity);
int tryFormat(V4L2SubdeviceFormat *format) const;
virtual const std::vector<unsigned int> &mbusCodes() const = 0;
virtual std::vector<Size> sizes(unsigned int mbusCode) const = 0;
virtual Size resolution() const = 0;
const ControlInfoMap &controls() const;
ControlList getControls(const std::vector<uint32_t> &ids);
int setControls(ControlList *ctrls);
virtual V4L2SubdeviceFormat
getFormat(const std::vector<unsigned int> &mbusCodes,
const Size &size, const Size maxSize = Size()) const = 0;
virtual int setFormat(V4L2SubdeviceFormat *format,
Transform transform = Transform::Identity) = 0;
virtual int tryFormat(V4L2SubdeviceFormat *format) const = 0;
V4L2Subdevice *device() { return subdev_.get(); }
virtual int applyConfiguration(const SensorConfiguration &config,
Transform transform = Transform::Identity,
V4L2SubdeviceFormat *sensorFormat = nullptr) = 0;
const ControlList &properties() const { return properties_; }
int sensorInfo(IPACameraSensorInfo *info) const;
virtual V4L2Subdevice::Stream imageStream() const;
virtual std::optional<V4L2Subdevice::Stream> embeddedDataStream() const;
virtual V4L2SubdeviceFormat embeddedDataFormat() const;
virtual int setEmbeddedDataEnabled(bool enable);
void updateControlInfo();
virtual const ControlList &properties() const = 0;
virtual int sensorInfo(IPACameraSensorInfo *info) const = 0;
virtual Transform computeTransform(Orientation *orientation) const = 0;
virtual BayerFormat::Order bayerOrder(Transform t) const = 0;
CameraLens *focusLens() { return focusLens_.get(); }
virtual const ControlInfoMap &controls() const = 0;
virtual ControlList getControls(const std::vector<uint32_t> &ids) = 0;
virtual int setControls(ControlList *ctrls) = 0;
Transform validateTransform(Transform *transform) const;
virtual const std::vector<controls::draft::TestPatternModeEnum> &
testPatternModes() const = 0;
virtual int setTestPatternMode(controls::draft::TestPatternModeEnum mode) = 0;
virtual const CameraSensorProperties::SensorDelays &sensorDelays() = 0;
};
class CameraSensorFactoryBase
{
public:
CameraSensorFactoryBase(const char *name, int priority);
virtual ~CameraSensorFactoryBase() = default;
static std::unique_ptr<CameraSensor> create(MediaEntity *entity);
const std::string &name() const { return name_; }
int priority() const { return priority_; }
protected:
std::string logPrefix() const override;
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraSensorFactoryBase)
LIBCAMERA_DISABLE_COPY(CameraSensor)
static std::vector<CameraSensorFactoryBase *> &factories();
int generateId();
int validateSensorDriver();
void initVimcDefaultProperties();
void initStaticProperties();
void initTestPatternModes();
int initProperties();
int applyTestPatternMode(controls::draft::TestPatternModeEnum mode);
int discoverAncillaryDevices();
static void registerFactory(CameraSensorFactoryBase *factory);
const MediaEntity *entity_;
std::unique_ptr<V4L2Subdevice> subdev_;
unsigned int pad_;
virtual std::variant<std::unique_ptr<CameraSensor>, int>
match(MediaEntity *entity) const = 0;
const CameraSensorProperties *staticProps_;
std::string name_;
int priority_;
std::string model_;
std::string id_;
V4L2Subdevice::Formats formats_;
std::vector<unsigned int> mbusCodes_;
std::vector<Size> sizes_;
std::vector<controls::draft::TestPatternModeEnum> testPatternModes_;
controls::draft::TestPatternModeEnum testPatternMode_;
Size pixelArraySize_;
Rectangle activeArea_;
const BayerFormat *bayerFormat_;
bool supportFlips_;
ControlList properties_;
std::unique_ptr<CameraLens> focusLens_;
};
template<typename _CameraSensor>
class CameraSensorFactory final : public CameraSensorFactoryBase
{
public:
CameraSensorFactory(const char *name, int priority)
: CameraSensorFactoryBase(name, priority)
{
}
private:
std::variant<std::unique_ptr<CameraSensor>, int>
match(MediaEntity *entity) const override
{
return _CameraSensor::match(entity);
}
};
#define REGISTER_CAMERA_SENSOR(sensor, priority) \
static CameraSensorFactory<sensor> global_##sensor##Factory{ #sensor, priority };
} /* namespace libcamera */

View file

@ -2,13 +2,12 @@
/*
* Copyright (C) 2021, Google Inc.
*
* Database of camera sensor properties
* camera_sensor_properties.h - Database of camera sensor properties
*/
#pragma once
#include <map>
#include <stdint.h>
#include <string>
#include <libcamera/control_ids.h>
@ -17,18 +16,10 @@
namespace libcamera {
struct CameraSensorProperties {
struct SensorDelays {
uint8_t exposureDelay;
uint8_t gainDelay;
uint8_t vblankDelay;
uint8_t hblankDelay;
};
static const CameraSensorProperties *get(const std::string &sensor);
Size unitCellSize;
std::map<controls::draft::TestPatternModeEnum, int32_t> testPatternModes;
SensorDelays sensorDelays;
};
} /* namespace libcamera */

View file

@ -1,68 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2024, Raspberry Pi Ltd
*
* Camera recovery algorithm
*/
#pragma once
#include <stdint.h>
namespace libcamera {
class ClockRecovery
{
public:
ClockRecovery();
void configure(unsigned int numSamples = 100, unsigned int maxJitter = 2000,
unsigned int minSamples = 10, unsigned int errorThreshold = 50000);
void reset();
void addSample();
void addSample(uint64_t input, uint64_t output);
uint64_t getOutput(uint64_t input);
private:
/* Approximate number of samples over which the model state persists. */
unsigned int numSamples_;
/* Remove any output jitter larger than this immediately. */
unsigned int maxJitter_;
/* Number of samples required before we start to use model estimates. */
unsigned int minSamples_;
/* Threshold above which we assume the wallclock has been reset. */
unsigned int errorThreshold_;
/* How many samples seen (up to numSamples_). */
unsigned int count_;
/* This gets subtracted from all input values, just to make the numbers easier. */
uint64_t inputBase_;
/* As above, for the output. */
uint64_t outputBase_;
/* The previous input sample. */
uint64_t lastInput_;
/* The previous output sample. */
uint64_t lastOutput_;
/* Average x value seen so far. */
double xAve_;
/* Average y value seen so far */
double yAve_;
/* Average x^2 value seen so far. */
double x2Ave_;
/* Average x*y value seen so far. */
double xyAve_;
/*
* The latest estimate of linear parameters to derive the output clock
* from the input.
*/
double slope_;
double offset_;
/* Use this cumulative error to monitor for spontaneous clock updates. */
double error_;
};
} /* namespace libcamera */

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
* Control (de)serializer
* control_serializer.h - Control (de)serializer
*/
#pragma once

View file

@ -2,7 +2,7 @@
/*
* Copyright (C) 2019, Google Inc.
*
* Control validator
* control_validator.h - Control validator
*/
#pragma once

View file

@ -3,7 +3,7 @@
* Copyright (C) 2020, Laurent Pinchart
* Copyright 2022 NXP
*
* Generic format converter interface
* converter.h - Generic format converter interface
*/
#pragma once
@ -14,11 +14,9 @@
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include <libcamera/base/class.h>
#include <libcamera/base/flags.h>
#include <libcamera/base/signal.h>
#include <libcamera/geometry.h>
@ -28,25 +26,12 @@ namespace libcamera {
class FrameBuffer;
class MediaDevice;
class PixelFormat;
class Stream;
struct StreamConfiguration;
class Converter
{
public:
enum class Feature {
None = 0,
InputCrop = (1 << 0),
};
using Features = Flags<Feature>;
enum class Alignment {
Down = 0,
Up,
};
Converter(MediaDevice *media, Features features = Feature::None);
Converter(MediaDevice *media);
virtual ~Converter();
virtual int loadConfiguration(const std::string &filename) = 0;
@ -56,45 +41,25 @@ public:
virtual std::vector<PixelFormat> formats(PixelFormat input) = 0;
virtual SizeRange sizes(const Size &input) = 0;
virtual Size adjustInputSize(const PixelFormat &pixFmt,
const Size &size,
Alignment align = Alignment::Down) = 0;
virtual Size adjustOutputSize(const PixelFormat &pixFmt,
const Size &size,
Alignment align = Alignment::Down) = 0;
virtual std::tuple<unsigned int, unsigned int>
strideAndFrameSize(const PixelFormat &pixelFormat, const Size &size) = 0;
virtual int validateOutput(StreamConfiguration *cfg, bool *adjusted,
Alignment align = Alignment::Down) = 0;
virtual int configure(const StreamConfiguration &inputCfg,
const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs) = 0;
virtual bool isConfigured(const Stream *stream) const = 0;
virtual int exportBuffers(const Stream *stream, unsigned int count,
virtual int exportBuffers(unsigned int output, unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) = 0;
virtual int start() = 0;
virtual void stop() = 0;
virtual int queueBuffers(FrameBuffer *input,
const std::map<const Stream *, FrameBuffer *> &outputs) = 0;
virtual int setInputCrop(const Stream *stream, Rectangle *rect) = 0;
virtual std::pair<Rectangle, Rectangle> inputCropBounds() = 0;
virtual std::pair<Rectangle, Rectangle> inputCropBounds(const Stream *stream) = 0;
const std::map<unsigned int, FrameBuffer *> &outputs) = 0;
Signal<FrameBuffer *> inputBufferReady;
Signal<FrameBuffer *> outputBufferReady;
const std::string &deviceNode() const { return deviceNode_; }
Features features() const { return features_; }
protected:
Features features_;
private:
std::string deviceNode_;
};

View file

@ -3,7 +3,7 @@
* Copyright (C) 2020, Laurent Pinchart
* Copyright 2022 NXP
*
* V4l2 M2M Format converter interface
* converter_v4l2_m2m.h - V4l2 M2M Format converter interface
*/
#pragma once
@ -28,9 +28,7 @@ class FrameBuffer;
class MediaDevice;
class Size;
class SizeRange;
class Stream;
struct StreamConfiguration;
class Rectangle;
class V4L2M2MDevice;
class V4L2M2MConverter : public Converter
@ -38,45 +36,31 @@ class V4L2M2MConverter : public Converter
public:
V4L2M2MConverter(MediaDevice *media);
int loadConfiguration([[maybe_unused]] const std::string &filename) override { return 0; }
bool isValid() const override { return m2m_ != nullptr; }
int loadConfiguration([[maybe_unused]] const std::string &filename) { return 0; }
bool isValid() const { return m2m_ != nullptr; }
std::vector<PixelFormat> formats(PixelFormat input) override;
SizeRange sizes(const Size &input) override;
std::vector<PixelFormat> formats(PixelFormat input);
SizeRange sizes(const Size &input);
std::tuple<unsigned int, unsigned int>
strideAndFrameSize(const PixelFormat &pixelFormat, const Size &size) override;
Size adjustInputSize(const PixelFormat &pixFmt,
const Size &size, Alignment align = Alignment::Down) override;
Size adjustOutputSize(const PixelFormat &pixFmt,
const Size &size, Alignment align = Alignment::Down) override;
strideAndFrameSize(const PixelFormat &pixelFormat, const Size &size);
int configure(const StreamConfiguration &inputCfg,
const std::vector<std::reference_wrapper<StreamConfiguration>>
&outputCfg) override;
bool isConfigured(const Stream *stream) const override;
int exportBuffers(const Stream *stream, unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfg);
int exportBuffers(unsigned int ouput, unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers);
int start() override;
void stop() override;
int validateOutput(StreamConfiguration *cfg, bool *adjusted,
Alignment align = Alignment::Down) override;
int start();
void stop();
int queueBuffers(FrameBuffer *input,
const std::map<const Stream *, FrameBuffer *> &outputs) override;
int setInputCrop(const Stream *stream, Rectangle *rect) override;
std::pair<Rectangle, Rectangle> inputCropBounds() override { return inputCropBounds_; }
std::pair<Rectangle, Rectangle> inputCropBounds(const Stream *stream) override;
const std::map<unsigned int, FrameBuffer *> &outputs);
private:
class V4L2M2MStream : protected Loggable
class Stream : protected Loggable
{
public:
V4L2M2MStream(V4L2M2MConverter *converter, const Stream *stream);
Stream(V4L2M2MConverter *converter, unsigned int index);
bool isValid() const { return m2m_ != nullptr; }
@ -90,11 +74,6 @@ private:
int queueBuffers(FrameBuffer *input, FrameBuffer *output);
int setInputSelection(unsigned int target, Rectangle *rect);
int getInputSelection(unsigned int target, Rectangle *rect);
std::pair<Rectangle, Rectangle> inputCropBounds();
protected:
std::string logPrefix() const override;
@ -103,23 +82,17 @@ private:
void outputBufferReady(FrameBuffer *buffer);
V4L2M2MConverter *converter_;
const Stream *stream_;
unsigned int index_;
std::unique_ptr<V4L2M2MDevice> m2m_;
unsigned int inputBufferCount_;
unsigned int outputBufferCount_;
std::pair<Rectangle, Rectangle> inputCropBounds_;
};
Size adjustSizes(const Size &size, const std::vector<SizeRange> &ranges,
Alignment align);
std::unique_ptr<V4L2M2MDevice> m2m_;
std::map<const Stream *, std::unique_ptr<V4L2M2MStream>> streams_;
std::vector<Stream> streams_;
std::map<FrameBuffer *, unsigned int> queue_;
std::pair<Rectangle, Rectangle> inputCropBounds_;
};
} /* namespace libcamera */

View file

@ -1,46 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2024, Google Inc.
*
* Debug metadata helpers
*/
#pragma once
#include <libcamera/control_ids.h>
namespace libcamera {
class DebugMetadata
{
public:
DebugMetadata() = default;
void enableByControl(const ControlList &controls);
void enable(bool enable = true);
void setParent(DebugMetadata *parent);
void moveEntries(ControlList &list);
template<typename T, typename V>
void set(const Control<T> &ctrl, const V &value)
{
if (parent_) {
parent_->set(ctrl, value);
return;
}
if (!enabled_)
return;
cache_.set(ctrl, value);
}
void set(unsigned int id, const ControlValue &value);
private:
bool enabled_ = false;
DebugMetadata *parent_ = nullptr;
ControlList cache_;
};
} /* namespace libcamera */

Some files were not shown because too many files have changed in this diff Show more