libcamera: shaders: Move GL shader programs to src/libcamera/assets/shader

Moving the GL shaders to src/libcamera/assets/shader to allow for reuse of
these inside of the SoftISP.

Signed-off-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
This commit is contained in:
Bryan O'Donoghue 2024-01-24 15:06:00 +00:00
parent c2a68a2e44
commit 367b29199b
10 changed files with 43 additions and 43 deletions

View file

@ -0,0 +1,22 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Laurent Pinchart
*
* RGB.frag - Fragment shader code for RGB formats
*/
#ifdef GL_ES
precision mediump float;
#endif
varying vec2 textureOut;
uniform sampler2D tex_y;
void main(void)
{
vec3 rgb;
rgb = texture2D(tex_y, textureOut).RGB_PATTERN;
gl_FragColor = vec4(rgb, 1.0);
}

View file

@ -0,0 +1,42 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Linaro
*
* YUV_2_planes.frag - Fragment shader code for NV12, NV16 and NV24 formats
*/
#ifdef GL_ES
precision mediump float;
#endif
varying vec2 textureOut;
uniform sampler2D tex_y;
uniform sampler2D tex_u;
const mat3 yuv2rgb_matrix = mat3(
YUV2RGB_MATRIX
);
const vec3 yuv2rgb_offset = vec3(
YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
);
void main(void)
{
vec3 yuv;
yuv.x = texture2D(tex_y, textureOut).r;
#if defined(YUV_PATTERN_UV)
yuv.y = texture2D(tex_u, textureOut).r;
yuv.z = texture2D(tex_u, textureOut).a;
#elif defined(YUV_PATTERN_VU)
yuv.y = texture2D(tex_u, textureOut).a;
yuv.z = texture2D(tex_u, textureOut).r;
#else
#error Invalid pattern
#endif
vec3 rgb = yuv2rgb_matrix * (yuv - yuv2rgb_offset);
gl_FragColor = vec4(rgb, 1.0);
}

View file

@ -0,0 +1,36 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Linaro
*
* YUV_3_planes_UV.frag - Fragment shader code for YUV420 format
*/
#ifdef GL_ES
precision mediump float;
#endif
varying vec2 textureOut;
uniform sampler2D tex_y;
uniform sampler2D tex_u;
uniform sampler2D tex_v;
const mat3 yuv2rgb_matrix = mat3(
YUV2RGB_MATRIX
);
const vec3 yuv2rgb_offset = vec3(
YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
);
void main(void)
{
vec3 yuv;
yuv.x = texture2D(tex_y, textureOut).r;
yuv.y = texture2D(tex_u, textureOut).r;
yuv.z = texture2D(tex_v, textureOut).r;
vec3 rgb = yuv2rgb_matrix * (yuv - yuv2rgb_offset);
gl_FragColor = vec4(rgb, 1.0);
}

View file

@ -0,0 +1,83 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
* YUV_packed.frag - Fragment shader code for YUYV packed formats
*/
#ifdef GL_ES
precision mediump float;
#endif
varying vec2 textureOut;
uniform sampler2D tex_y;
uniform vec2 tex_step;
const mat3 yuv2rgb_matrix = mat3(
YUV2RGB_MATRIX
);
const vec3 yuv2rgb_offset = vec3(
YUV2RGB_Y_OFFSET / 255.0, 128.0 / 255.0, 128.0 / 255.0
);
void main(void)
{
/*
* The sampler won't interpolate the texture correctly along the X axis,
* as each RGBA pixel effectively stores two pixels. We thus need to
* interpolate manually.
*
* In integer texture coordinates, the Y values are layed out in the
* texture memory as follows:
*
* ...| Y U Y V | Y U Y V | Y U Y V |...
* ...| R G B A | R G B A | R G B A |...
* ^ ^ ^ ^ ^ ^
* | | | | | |
* n-1 n-0.5 n n+0.5 n+1 n+1.5
*
* For a texture location x in the interval [n, n+1[, sample the left
* and right pixels at n and n+1, and interpolate them with
*
* left.r * (1 - a) + left.b * a if fract(x) < 0.5
* left.b * (1 - a) + right.r * a if fract(x) >= 0.5
*
* with a = fract(x * 2) which can also be written
*
* a = fract(x) * 2 if fract(x) < 0.5
* a = fract(x) * 2 - 1 if fract(x) >= 0.5
*/
vec2 pos = textureOut;
float f_x = fract(pos.x / tex_step.x);
vec4 left = texture2D(tex_y, vec2(pos.x - f_x * tex_step.x, pos.y));
vec4 right = texture2D(tex_y, vec2(pos.x + (1.0 - f_x) * tex_step.x , pos.y));
#if defined(YUV_PATTERN_UYVY)
float y_left = mix(left.g, left.a, f_x * 2.0);
float y_right = mix(left.a, right.g, f_x * 2.0 - 1.0);
vec2 uv = mix(left.rb, right.rb, f_x);
#elif defined(YUV_PATTERN_VYUY)
float y_left = mix(left.g, left.a, f_x * 2.0);
float y_right = mix(left.a, right.g, f_x * 2.0 - 1.0);
vec2 uv = mix(left.br, right.br, f_x);
#elif defined(YUV_PATTERN_YUYV)
float y_left = mix(left.r, left.b, f_x * 2.0);
float y_right = mix(left.b, right.r, f_x * 2.0 - 1.0);
vec2 uv = mix(left.ga, right.ga, f_x);
#elif defined(YUV_PATTERN_YVYU)
float y_left = mix(left.r, left.b, f_x * 2.0);
float y_right = mix(left.b, right.r, f_x * 2.0 - 1.0);
vec2 uv = mix(left.ag, right.ag, f_x);
#else
#error Invalid pattern
#endif
float y = mix(y_left, y_right, step(0.5, f_x));
vec3 rgb = yuv2rgb_matrix * (vec3(y, uv) - yuv2rgb_offset);
gl_FragColor = vec4(rgb, 1.0);
}

View file

@ -0,0 +1,216 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
* Based on the code from http://jgt.akpeters.com/papers/McGuire08/
*
* Efficient, High-Quality Bayer Demosaic Filtering on GPUs
*
* Morgan McGuire
*
* This paper appears in issue Volume 13, Number 4.
* ---------------------------------------------------------
* Copyright (c) 2008, Morgan McGuire. All rights reserved.
*
*
* Modified by Linaro Ltd for 10/12-bit packed vs 8-bit raw Bayer format,
* and for simpler demosaic algorithm.
* Copyright (C) 2020, Linaro
*
* bayer_1x_packed.frag - Fragment shader code for raw Bayer 10-bit and 12-bit
* packed formats
*/
#ifdef GL_ES
precision mediump float;
#endif
/*
* These constants are used to select the bytes containing the HS part of
* the pixel value:
* BPP - bytes per pixel,
* THRESHOLD_L = fract(BPP) * 0.5 + 0.02
* THRESHOLD_H = 1.0 - fract(BPP) * 1.5 + 0.02
* Let X is the x coordinate in the texture measured in bytes (so that the
* range is from 0 to (stride_-1)) aligned on the nearest pixel.
* E.g. for RAW10P:
* -------------+-------------------+-------------------+--
* pixel No | 0 1 2 3 | 4 5 6 7 | ...
* -------------+-------------------+-------------------+--
* byte offset | 0 1 2 3 4 | 5 6 7 8 9 | ...
* -------------+-------------------+-------------------+--
* X | 0.0 1.25 2.5 3.75 | 5.0 6.25 7.5 8.75 | ...
* -------------+-------------------+-------------------+--
* If fract(X) < THRESHOLD_L then the previous byte contains the LS
* bits of the pixel values and needs to be skipped.
* If fract(X) > THRESHOLD_H then the next byte contains the LS bits
* of the pixel values and needs to be skipped.
*/
#if defined(RAW10P)
#define BPP 1.25
#define THRESHOLD_L 0.14
#define THRESHOLD_H 0.64
#elif defined(RAW12P)
#define BPP 1.5
#define THRESHOLD_L 0.27
#define THRESHOLD_H 0.27
#else
#error Invalid raw format
#endif
varying vec2 textureOut;
/* the texture size in pixels */
uniform vec2 tex_size;
uniform vec2 tex_step;
uniform vec2 tex_bayer_first_red;
uniform sampler2D tex_y;
void main(void)
{
vec3 rgb;
/*
* center_bytes holds the coordinates of the MS byte of the pixel
* being sampled on the [0, stride-1/height-1] range.
* center_pixel holds the coordinates of the pixel being sampled
* on the [0, width/height-1] range.
*/
vec2 center_bytes;
vec2 center_pixel;
/*
* x- and y-positions of the adjacent pixels on the [0, 1] range.
*/
vec2 xcoords;
vec2 ycoords;
/*
* The coordinates passed to the shader in textureOut may point
* to a place in between the pixels if the texture format doesn't
* match the image format. In particular, MIPI packed raw Bayer
* formats don't have a matching texture format.
* In this case align the coordinates to the left nearest pixel
* by hand.
*/
center_pixel = floor(textureOut * tex_size);
center_bytes.y = center_pixel.y;
/*
* Add a small number (a few mantissa's LSBs) to avoid float
* representation issues. Maybe paranoic.
*/
center_bytes.x = BPP * center_pixel.x + 0.02;
float fract_x = fract(center_bytes.x);
/*
* The below floor() call ensures that center_bytes.x points
* at one of the bytes representing the 8 higher bits of
* the pixel value, not at the byte containing the LS bits
* of the group of the pixels.
*/
center_bytes.x = floor(center_bytes.x);
center_bytes *= tex_step;
xcoords = center_bytes.x + vec2(-tex_step.x, tex_step.x);
ycoords = center_bytes.y + vec2(-tex_step.y, tex_step.y);
/*
* If xcoords[0] points at the byte containing the LS bits
* of the previous group of the pixels, move xcoords[0] one
* byte back.
*/
xcoords[0] += (fract_x < THRESHOLD_L) ? -tex_step.x : 0.0;
/*
* If xcoords[1] points at the byte containing the LS bits
* of the current group of the pixels, move xcoords[1] one
* byte forward.
*/
xcoords[1] += (fract_x > THRESHOLD_H) ? tex_step.x : 0.0;
vec2 alternate = mod(center_pixel.xy + tex_bayer_first_red, 2.0);
bool even_col = alternate.x < 1.0;
bool even_row = alternate.y < 1.0;
/*
* We need to sample the central pixel and the ones with offset
* of -1 to +1 pixel in both X and Y directions. Let's name these
* pixels as below, where C is the central pixel:
*
* +----+----+----+----+
* | \ x| | | |
* |y \ | -1 | 0 | +1 |
* +----+----+----+----+
* | +1 | D2 | A1 | D3 |
* +----+----+----+----+
* | 0 | B0 | C | B1 |
* +----+----+----+----+
* | -1 | D0 | A0 | D1 |
* +----+----+----+----+
*
* In the below equations (0,-1).r means "r component of the texel
* shifted by -tex_step.y from the center_bytes one" etc.
*
* In the even row / even column (EE) case the colour values are:
* R = C = (0,0).r,
* G = (A0 + A1 + B0 + B1) / 4.0 =
* ( (0,-1).r + (0,1).r + (-1,0).r + (1,0).r ) / 4.0,
* B = (D0 + D1 + D2 + D3) / 4.0 =
* ( (-1,-1).r + (1,-1).r + (-1,1).r + (1,1).r ) / 4.0
*
* For even row / odd column (EO):
* R = (B0 + B1) / 2.0 = ( (-1,0).r + (1,0).r ) / 2.0,
* G = C = (0,0).r,
* B = (A0 + A1) / 2.0 = ( (0,-1).r + (0,1).r ) / 2.0
*
* For odd row / even column (OE):
* R = (A0 + A1) / 2.0 = ( (0,-1).r + (0,1).r ) / 2.0,
* G = C = (0,0).r,
* B = (B0 + B1) / 2.0 = ( (-1,0).r + (1,0).r ) / 2.0
*
* For odd row / odd column (OO):
* R = (D0 + D1 + D2 + D3) / 4.0 =
* ( (-1,-1).r + (1,-1).r + (-1,1).r + (1,1).r ) / 4.0,
* G = (A0 + A1 + B0 + B1) / 4.0 =
* ( (0,-1).r + (0,1).r + (-1,0).r + (1,0).r ) / 4.0,
* B = C = (0,0).r
*/
/*
* Fetch the values and precalculate the terms:
* patterns.x = (A0 + A1) / 2.0
* patterns.y = (B0 + B1) / 2.0
* patterns.z = (A0 + A1 + B0 + B1) / 4.0
* patterns.w = (D0 + D1 + D2 + D3) / 4.0
*/
#define fetch(x, y) texture2D(tex_y, vec2(x, y)).r
float C = texture2D(tex_y, center_bytes).r;
vec4 patterns = vec4(
fetch(center_bytes.x, ycoords[0]), /* A0: (0,-1) */
fetch(xcoords[0], center_bytes.y), /* B0: (-1,0) */
fetch(xcoords[0], ycoords[0]), /* D0: (-1,-1) */
fetch(xcoords[1], ycoords[0])); /* D1: (1,-1) */
vec4 temp = vec4(
fetch(center_bytes.x, ycoords[1]), /* A1: (0,1) */
fetch(xcoords[1], center_bytes.y), /* B1: (1,0) */
fetch(xcoords[1], ycoords[1]), /* D3: (1,1) */
fetch(xcoords[0], ycoords[1])); /* D2: (-1,1) */
patterns = (patterns + temp) * 0.5;
/* .x = (A0 + A1) / 2.0, .y = (B0 + B1) / 2.0 */
/* .z = (D0 + D3) / 2.0, .w = (D1 + D2) / 2.0 */
patterns.w = (patterns.z + patterns.w) * 0.5;
patterns.z = (patterns.x + patterns.y) * 0.5;
rgb = even_col ?
(even_row ?
vec3(C, patterns.zw) :
vec3(patterns.x, C, patterns.y)) :
(even_row ?
vec3(patterns.y, C, patterns.x) :
vec3(patterns.wz, C));
gl_FragColor = vec4(rgb, 1.0);
}

View file

@ -0,0 +1,107 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
From http://jgt.akpeters.com/papers/McGuire08/
Efficient, High-Quality Bayer Demosaic Filtering on GPUs
Morgan McGuire
This paper appears in issue Volume 13, Number 4.
---------------------------------------------------------
Copyright (c) 2008, Morgan McGuire. All rights reserved.
Modified by Linaro Ltd to integrate it into libcamera.
Copyright (C) 2021, Linaro
*/
//Pixel Shader
#ifdef GL_ES
precision mediump float;
#endif
/** Monochrome RGBA or GL_LUMINANCE Bayer encoded texture.*/
uniform sampler2D tex_y;
varying vec4 center;
varying vec4 yCoord;
varying vec4 xCoord;
void main(void) {
#define fetch(x, y) texture2D(tex_y, vec2(x, y)).r
float C = texture2D(tex_y, center.xy).r; // ( 0, 0)
const vec4 kC = vec4( 4.0, 6.0, 5.0, 5.0) / 8.0;
// Determine which of four types of pixels we are on.
vec2 alternate = mod(floor(center.zw), 2.0);
vec4 Dvec = vec4(
fetch(xCoord[1], yCoord[1]), // (-1,-1)
fetch(xCoord[1], yCoord[2]), // (-1, 1)
fetch(xCoord[2], yCoord[1]), // ( 1,-1)
fetch(xCoord[2], yCoord[2])); // ( 1, 1)
vec4 PATTERN = (kC.xyz * C).xyzz;
// Can also be a dot product with (1,1,1,1) on hardware where that is
// specially optimized.
// Equivalent to: D = Dvec[0] + Dvec[1] + Dvec[2] + Dvec[3];
Dvec.xy += Dvec.zw;
Dvec.x += Dvec.y;
vec4 value = vec4(
fetch(center.x, yCoord[0]), // ( 0,-2)
fetch(center.x, yCoord[1]), // ( 0,-1)
fetch(xCoord[0], center.y), // (-2, 0)
fetch(xCoord[1], center.y)); // (-1, 0)
vec4 temp = vec4(
fetch(center.x, yCoord[3]), // ( 0, 2)
fetch(center.x, yCoord[2]), // ( 0, 1)
fetch(xCoord[3], center.y), // ( 2, 0)
fetch(xCoord[2], center.y)); // ( 1, 0)
// Even the simplest compilers should be able to constant-fold these to
// avoid the division.
// Note that on scalar processors these constants force computation of some
// identical products twice.
const vec4 kA = vec4(-1.0, -1.5, 0.5, -1.0) / 8.0;
const vec4 kB = vec4( 2.0, 0.0, 0.0, 4.0) / 8.0;
const vec4 kD = vec4( 0.0, 2.0, -1.0, -1.0) / 8.0;
// Conserve constant registers and take advantage of free swizzle on load
#define kE (kA.xywz)
#define kF (kB.xywz)
value += temp;
// There are five filter patterns (identity, cross, checker,
// theta, phi). Precompute the terms from all of them and then
// use swizzles to assign to color channels.
//
// Channel Matches
// x cross (e.g., EE G)
// y checker (e.g., EE B)
// z theta (e.g., EO R)
// w phi (e.g., EO R)
#define A (value[0])
#define B (value[1])
#define D (Dvec.x)
#define E (value[2])
#define F (value[3])
// Avoid zero elements. On a scalar processor this saves two MADDs
// and it has no effect on a vector processor.
PATTERN.yzw += (kD.yz * D).xyy;
PATTERN += (kA.xyz * A).xyzx + (kE.xyw * E).xyxz;
PATTERN.xw += kB.xw * B;
PATTERN.xz += kF.xz * F;
gl_FragColor.rgb = (alternate.y == 0.0) ?
((alternate.x == 0.0) ?
vec3(C, PATTERN.xy) :
vec3(PATTERN.z, C, PATTERN.w)) :
((alternate.x == 0.0) ?
vec3(PATTERN.w, C, PATTERN.z) :
vec3(PATTERN.yx, C));
}

View file

@ -0,0 +1,53 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
From http://jgt.akpeters.com/papers/McGuire08/
Efficient, High-Quality Bayer Demosaic Filtering on GPUs
Morgan McGuire
This paper appears in issue Volume 13, Number 4.
---------------------------------------------------------
Copyright (c) 2008, Morgan McGuire. All rights reserved.
Modified by Linaro Ltd to integrate it into libcamera.
Copyright (C) 2021, Linaro
*/
//Vertex Shader
attribute vec4 vertexIn;
attribute vec2 textureIn;
uniform mat4 proj_matrix;
uniform vec2 tex_size; /* The texture size in pixels */
uniform vec2 tex_step;
/** Pixel position of the first red pixel in the */
/** Bayer pattern. [{0,1}, {0, 1}]*/
uniform vec2 tex_bayer_first_red;
/** .xy = Pixel being sampled in the fragment shader on the range [0, 1]
.zw = ...on the range [0, sourceSize], offset by firstRed */
varying vec4 center;
/** center.x + (-2/w, -1/w, 1/w, 2/w); These are the x-positions */
/** of the adjacent pixels.*/
varying vec4 xCoord;
/** center.y + (-2/h, -1/h, 1/h, 2/h); These are the y-positions */
/** of the adjacent pixels.*/
varying vec4 yCoord;
void main(void) {
center.xy = textureIn;
center.zw = textureIn * tex_size + tex_bayer_first_red;
xCoord = center.x + vec4(-2.0 * tex_step.x,
-tex_step.x, tex_step.x, 2.0 * tex_step.x);
yCoord = center.y + vec4(-2.0 * tex_step.y,
-tex_step.y, tex_step.y, 2.0 * tex_step.y);
gl_Position = proj_matrix * vertexIn;
}

View file

@ -0,0 +1,19 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Linaro
*
* identity.vert - Identity vertex shader for pixel format conversion
*/
attribute vec4 vertexIn;
attribute vec2 textureIn;
varying vec2 textureOut;
uniform mat4 proj_matrix;
uniform float stride_factor;
void main(void)
{
gl_Position = proj_matrix * vertexIn;
textureOut = vec2(textureIn.x * stride_factor, textureIn.y);
}