utils: raspberrypi: ctt: Adapt tuning tool for both VC4 and PiSP

The old ctt.py and alsc_only.py scripts are removed.

Instead of ctt.py use ctt_vc4.py or ctt_pisp.py, depending on your
target platform.

Instead of alsc_only.py use alsc_vc4.py or alsc_pisp.py, again
according to your platform.

Signed-off-by: David Plowman <david.plowman@raspberrypi.com>
Reviewed-by: Naushir Patuck <naush@raspberrypi.com>
Tested-by: Naushir Patuck <naush@raspberrypi.com>
Acked-by: Kieran Bingham <kieran.bingham@ideasonboard.com>
Signed-off-by: Kieran Bingham <kieran.bingham@ideasonboard.com>
This commit is contained in:
David Plowman 2024-06-06 11:15:07 +01:00 committed by Kieran Bingham
parent 634bc7838f
commit d13542c28f
9 changed files with 511 additions and 208 deletions

View file

@ -0,0 +1,37 @@
#!/usr/bin/env python3
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2022, Raspberry Pi (Trading) Limited
#
# alsc_only.py - alsc tuning tool
import sys
from ctt_pisp import json_template, grid_size, target
from ctt_run import run_ctt
from ctt_tools import parse_input
if __name__ == '__main__':
"""
initialise calibration
"""
if len(sys.argv) == 1:
print("""
PiSP Lens Shading Camera Tuning Tool version 1.0
Required Arguments:
'-i' : Calibration image directory.
'-o' : Name of output json file.
Optional Arguments:
'-c' : Config file for the CTT. If not passed, default parameters used.
'-l' : Name of output log file. If not passed, 'ctt_log.txt' used.
""")
quit(0)
else:
"""
parse input arguments
"""
json_output, directory, config, log_output = parse_input()
run_ctt(json_output, directory, config, log_output, json_template, grid_size, target, alsc_only=True)

View file

@ -6,8 +6,11 @@
#
# alsc tuning tool
from ctt import *
import sys
from ctt_vc4 import json_template, grid_size, target
from ctt_run import run_ctt
from ctt_tools import parse_input
if __name__ == '__main__':
"""
@ -15,7 +18,7 @@ if __name__ == '__main__':
"""
if len(sys.argv) == 1:
print("""
Pisp Camera Tuning Tool version 1.0
VC4 Lens Shading Camera Tuning Tool version 1.0
Required Arguments:
'-i' : Calibration image directory.
@ -31,4 +34,4 @@ if __name__ == '__main__':
parse input arguments
"""
json_output, directory, config, log_output = parse_input()
run_ctt(json_output, directory, config, log_output, alsc_only=True)
run_ctt(json_output, directory, config, log_output, json_template, grid_size, target, alsc_only=True)

View file

@ -13,8 +13,9 @@ from mpl_toolkits.mplot3d import Axes3D
"""
preform alsc calibration on a set of images
"""
def alsc_all(Cam, do_alsc_colour, plot):
def alsc_all(Cam, do_alsc_colour, plot, grid_size=(16, 12)):
imgs_alsc = Cam.imgs_alsc
grid_w, grid_h = grid_size
"""
create list of colour temperatures and associated calibration tables
"""
@ -23,7 +24,7 @@ def alsc_all(Cam, do_alsc_colour, plot):
list_cb = []
list_cg = []
for Img in imgs_alsc:
col, cr, cb, cg, size = alsc(Cam, Img, do_alsc_colour, plot)
col, cr, cb, cg, size = alsc(Cam, Img, do_alsc_colour, plot, grid_size=grid_size)
list_col.append(col)
list_cr.append(cr)
list_cb.append(cb)
@ -68,11 +69,12 @@ def alsc_all(Cam, do_alsc_colour, plot):
t_b = np.where((100*t_b) % 1 >= 0.95, t_b-0.001, t_b)
t_r = np.round(t_r, 3)
t_b = np.round(t_b, 3)
r_corners = (t_r[0], t_r[15], t_r[-1], t_r[-16])
b_corners = (t_b[0], t_b[15], t_b[-1], t_b[-16])
r_cen = t_r[5*16+7]+t_r[5*16+8]+t_r[6*16+7]+t_r[6*16+8]
r_corners = (t_r[0], t_r[grid_w - 1], t_r[-1], t_r[-grid_w])
b_corners = (t_b[0], t_b[grid_w - 1], t_b[-1], t_b[-grid_w])
middle_pos = (grid_h // 2 - 1) * grid_w + grid_w - 1
r_cen = t_r[middle_pos]+t_r[middle_pos + 1]+t_r[middle_pos + grid_w]+t_r[middle_pos + grid_w + 1]
r_cen = round(r_cen/4, 3)
b_cen = t_b[5*16+7]+t_b[5*16+8]+t_b[6*16+7]+t_b[6*16+8]
b_cen = t_b[middle_pos]+t_b[middle_pos + 1]+t_b[middle_pos + grid_w]+t_b[middle_pos + grid_w + 1]
b_cen = round(b_cen/4, 3)
Cam.log += '\nRed table corners: {}'.format(r_corners)
Cam.log += '\nRed table centre: {}'.format(r_cen)
@ -116,8 +118,9 @@ def alsc_all(Cam, do_alsc_colour, plot):
"""
calculate g/r and g/b for 32x32 points arranged in a grid for a single image
"""
def alsc(Cam, Img, do_alsc_colour, plot=False):
def alsc(Cam, Img, do_alsc_colour, plot=False, grid_size=(16, 12)):
Cam.log += '\nProcessing image: ' + Img.name
grid_w, grid_h = grid_size
"""
get channel in correct order
"""
@ -128,24 +131,24 @@ def alsc(Cam, Img, do_alsc_colour, plot=False):
where w is a multiple of 32.
"""
w, h = Img.w/2, Img.h/2
dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12))
dx, dy = int(-(-(w-1)//grid_w)), int(-(-(h-1)//grid_h))
"""
average the green channels into one
"""
av_ch_g = np.mean((channels[1:3]), axis=0)
if do_alsc_colour:
"""
obtain 16x12 grid of intensities for each channel and subtract black level
obtain grid_w x grid_h grid of intensities for each channel and subtract black level
"""
g = get_16x12_grid(av_ch_g, dx, dy) - Img.blacklevel_16
r = get_16x12_grid(channels[0], dx, dy) - Img.blacklevel_16
b = get_16x12_grid(channels[3], dx, dy) - Img.blacklevel_16
g = get_grid(av_ch_g, dx, dy, grid_size) - Img.blacklevel_16
r = get_grid(channels[0], dx, dy, grid_size) - Img.blacklevel_16
b = get_grid(channels[3], dx, dy, grid_size) - Img.blacklevel_16
"""
calculate ratios as 32 bit in order to be supported by medianBlur function
"""
cr = np.reshape(g/r, (12, 16)).astype('float32')
cb = np.reshape(g/b, (12, 16)).astype('float32')
cg = np.reshape(1/g, (12, 16)).astype('float32')
cr = np.reshape(g/r, (grid_h, grid_w)).astype('float32')
cb = np.reshape(g/b, (grid_h, grid_w)).astype('float32')
cg = np.reshape(1/g, (grid_h, grid_w)).astype('float32')
"""
median blur to remove peaks and save as float 64
"""
@ -164,7 +167,7 @@ def alsc(Cam, Img, do_alsc_colour, plot=False):
"""
note Y is plotted as -Y so plot has same axes as image
"""
X, Y = np.meshgrid(range(16), range(12))
X, Y = np.meshgrid(range(grid_w), range(grid_h))
ha.plot_surface(X, -Y, cr, cmap=cm.coolwarm, linewidth=0)
ha.set_title('ALSC Plot\nImg: {}\n\ncr'.format(Img.str))
hb = hf.add_subplot(312, projection='3d')
@ -182,15 +185,15 @@ def alsc(Cam, Img, do_alsc_colour, plot=False):
"""
only perform calculations for luminance shading
"""
g = get_16x12_grid(av_ch_g, dx, dy) - Img.blacklevel_16
cg = np.reshape(1/g, (12, 16)).astype('float32')
g = get_grid(av_ch_g, dx, dy, grid_size) - Img.blacklevel_16
cg = np.reshape(1/g, (grid_h, grid_w)).astype('float32')
cg = cv2.medianBlur(cg, 3).astype('float64')
cg = cg/np.min(cg)
if plot:
hf = plt.figure(figssize=(8, 8))
ha = hf.add_subplot(1, 1, 1, projection='3d')
X, Y = np.meashgrid(range(16), range(12))
X, Y = np.meashgrid(range(grid_w), range(grid_h))
ha.plot_surface(X, -Y, cg, cmap=cm.coolwarm, linewidth=0)
ha.set_title('ALSC Plot (Luminance only!)\nImg: {}\n\ncg').format(Img.str)
plt.show()
@ -199,21 +202,22 @@ def alsc(Cam, Img, do_alsc_colour, plot=False):
"""
Compresses channel down to a 16x12 grid
Compresses channel down to a grid of the requested size
"""
def get_16x12_grid(chan, dx, dy):
def get_grid(chan, dx, dy, grid_size):
grid_w, grid_h = grid_size
grid = []
"""
since left and bottom border will not necessarily have rectangles of
dimension dx x dy, the 32nd iteration has to be handled separately.
"""
for i in range(11):
for j in range(15):
for i in range(grid_h - 1):
for j in range(grid_w - 1):
grid.append(np.mean(chan[dy*i:dy*(1+i), dx*j:dx*(1+j)]))
grid.append(np.mean(chan[dy*i:dy*(1+i), 15*dx:]))
for j in range(15):
grid.append(np.mean(chan[11*dy:, dx*j:dx*(1+j)]))
grid.append(np.mean(chan[11*dy:, 15*dx:]))
grid.append(np.mean(chan[dy*i:dy*(1+i), (grid_w - 1)*dx:]))
for j in range(grid_w - 1):
grid.append(np.mean(chan[(grid_h - 1)*dy:, dx*j:dx*(1+j)]))
grid.append(np.mean(chan[(grid_h - 1)*dy:, (grid_w - 1)*dx:]))
"""
return as np.array, ready for further manipulation
"""
@ -223,7 +227,7 @@ def get_16x12_grid(chan, dx, dy):
"""
obtains sigmas for red and blue, effectively a measure of the 'error'
"""
def get_sigma(Cam, cal_cr_list, cal_cb_list):
def get_sigma(Cam, cal_cr_list, cal_cb_list, grid_size):
Cam.log += '\nCalculating sigmas'
"""
provided colour alsc tables were generated for two different colour
@ -241,8 +245,8 @@ def get_sigma(Cam, cal_cr_list, cal_cb_list):
sigma_rs = []
sigma_bs = []
for i in range(len(cts)-1):
sigma_rs.append(calc_sigma(cal_cr_list[i]['table'], cal_cr_list[i+1]['table']))
sigma_bs.append(calc_sigma(cal_cb_list[i]['table'], cal_cb_list[i+1]['table']))
sigma_rs.append(calc_sigma(cal_cr_list[i]['table'], cal_cr_list[i+1]['table'], grid_size))
sigma_bs.append(calc_sigma(cal_cb_list[i]['table'], cal_cb_list[i+1]['table'], grid_size))
Cam.log += '\nColour temperature interval {} - {} K'.format(cts[i], cts[i+1])
Cam.log += '\nSigma red: {}'.format(sigma_rs[-1])
Cam.log += '\nSigma blue: {}'.format(sigma_bs[-1])
@ -263,12 +267,13 @@ def get_sigma(Cam, cal_cr_list, cal_cb_list):
"""
calculate sigma from two adjacent gain tables
"""
def calc_sigma(g1, g2):
def calc_sigma(g1, g2, grid_size):
grid_w, grid_h = grid_size
"""
reshape into 16x12 matrix
"""
g1 = np.reshape(g1, (12, 16))
g2 = np.reshape(g2, (12, 16))
g1 = np.reshape(g1, (grid_h, grid_w))
g2 = np.reshape(g2, (grid_h, grid_w))
"""
apply gains to gain table
"""
@ -280,8 +285,8 @@ def calc_sigma(g1, g2):
neighbours, then append to list
"""
diffs = []
for i in range(10):
for j in range(14):
for i in range(grid_h - 2):
for j in range(grid_w - 2):
"""
note indexing is incremented by 1 since all patches on borders are
not counted

View file

@ -13,7 +13,7 @@ from scipy.optimize import fmin
"""
obtain piecewise linear approximation for colour curve
"""
def awb(Cam, cal_cr_list, cal_cb_list, plot):
def awb(Cam, cal_cr_list, cal_cb_list, plot, grid_size):
imgs = Cam.imgs
"""
condense alsc calibration tables into one dictionary
@ -43,7 +43,7 @@ def awb(Cam, cal_cr_list, cal_cb_list, plot):
Note: if alsc is disabled then colour_cals will be set to None and the
function will just return the greyscale patches
"""
r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals)
r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals, grid_size=grid_size)
"""
calculate ratio of r, b to g
"""
@ -293,12 +293,13 @@ def awb(Cam, cal_cr_list, cal_cb_list, plot):
"""
obtain greyscale patches and perform alsc colour correction
"""
def get_alsc_patches(Img, colour_cals, grey=True):
def get_alsc_patches(Img, colour_cals, grey=True, grid_size=(16, 12)):
"""
get patch centre coordinates, image colour and the actual
patches for each channel, remembering to subtract blacklevel
If grey then only greyscale patches considered
"""
grid_w, grid_h = grid_size
if grey:
cen_coords = Img.cen_coords[3::4]
col = Img.col
@ -345,12 +346,12 @@ def get_alsc_patches(Img, colour_cals, grey=True):
bef_tabs = np.array(colour_cals[bef])
aft_tabs = np.array(colour_cals[aft])
col_tabs = (bef_tabs*db + aft_tabs*da)/(da+db)
col_tabs = np.reshape(col_tabs, (2, 12, 16))
col_tabs = np.reshape(col_tabs, (2, grid_h, grid_w))
"""
calculate dx, dy used to calculate alsc table
"""
w, h = Img.w/2, Img.h/2
dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12))
dx, dy = int(-(-(w-1)//grid_w)), int(-(-(h-1)//grid_h))
"""
make list of pairs of gains for each patch by selecting the correct value
in alsc colour calibration table

View file

@ -56,7 +56,7 @@ FInds colour correction matrices for list of images
"""
def ccm(Cam, cal_cr_list, cal_cb_list):
def ccm(Cam, cal_cr_list, cal_cb_list, grid_size):
global matrix_selection_types, typenum
imgs = Cam.imgs
"""
@ -133,9 +133,7 @@ def ccm(Cam, cal_cr_list, cal_cb_list):
Note: if alsc is disabled then colour_cals will be set to None and no
the function will simply return the macbeth patches
"""
r, b, g = get_alsc_patches(Img, colour_cals, grey=False)
# 256 values for each patch of sRGB values
r, b, g = get_alsc_patches(Img, colour_cals, grey=False, grid_size=grid_size)
"""
do awb
Note: awb is done by measuring the macbeth chart in the image, rather

233
utils/raspberrypi/ctt/ctt_pisp.py Executable file
View file

@ -0,0 +1,233 @@
#!/usr/bin/env python3
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# ctt_pisp.py - camera tuning tool for PiSP platforms
import os
import sys
from ctt_run import run_ctt
from ctt_tools import parse_input
json_template = {
"rpi.black_level": {
"black_level": 4096
},
"rpi.lux": {
"reference_shutter_speed": 10000,
"reference_gain": 1,
"reference_aperture": 1.0
},
"rpi.dpc": {
"strength": 1
},
"rpi.noise": {
},
"rpi.geq": {
},
"rpi.denoise":
{
"sdn":
{
"deviation": 1.6,
"strength": 0.5,
"deviation2": 3.2,
"deviation_no_tdn": 3.2,
"strength_no_tdn": 0.75
},
"cdn":
{
"deviation": 200,
"strength": 0.3
},
"tdn":
{
"deviation": 0.8,
"threshold": 0.05
}
},
"rpi.awb": {
"priors": [
{"lux": 0, "prior": [2000, 1.0, 3000, 0.0, 13000, 0.0]},
{"lux": 800, "prior": [2000, 0.0, 6000, 2.0, 13000, 2.0]},
{"lux": 1500, "prior": [2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0]}
],
"modes": {
"auto": {"lo": 2500, "hi": 7700},
"incandescent": {"lo": 2500, "hi": 3000},
"tungsten": {"lo": 3000, "hi": 3500},
"fluorescent": {"lo": 4000, "hi": 4700},
"indoor": {"lo": 3000, "hi": 5000},
"daylight": {"lo": 5500, "hi": 6500},
"cloudy": {"lo": 7000, "hi": 8000}
},
"bayes": 1
},
"rpi.agc": {
"metering_modes": {
"centre-weighted": {
"weights": [
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0
]
},
"spot": {
"weights": [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
},
"matrix": {
"weights": [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
]
}
},
"exposure_modes": {
"normal": {
"shutter": [100, 10000, 30000, 60000, 66666],
"gain": [1.0, 1.5, 2.0, 4.0, 8.0]
},
"short": {
"shutter": [100, 5000, 10000, 20000, 60000],
"gain": [1.0, 1.5, 2.0, 4.0, 8.0]
},
"long":
{
"shutter": [ 100, 10000, 30000, 60000, 90000, 120000 ],
"gain": [ 1.0, 1.5, 2.0, 4.0, 8.0, 12.0 ]
}
},
"constraint_modes": {
"normal": [
{"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]}
],
"highlight": [
{"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]},
{"bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.8, 1000, 0.8]}
]
},
"y_target": [0, 0.16, 1000, 0.165, 10000, 0.17]
},
"rpi.alsc": {
'omega': 1.3,
'n_iter': 100,
'luminance_strength': 0.8,
},
"rpi.contrast": {
"ce_enable": 1,
"gamma_curve": [
0, 0,
1024, 5040,
2048, 9338,
3072, 12356,
4096, 15312,
5120, 18051,
6144, 20790,
7168, 23193,
8192, 25744,
9216, 27942,
10240, 30035,
11264, 32005,
12288, 33975,
13312, 35815,
14336, 37600,
15360, 39168,
16384, 40642,
18432, 43379,
20480, 45749,
22528, 47753,
24576, 49621,
26624, 51253,
28672, 52698,
30720, 53796,
32768, 54876,
36864, 57012,
40960, 58656,
45056, 59954,
49152, 61183,
53248, 62355,
57344, 63419,
61440, 64476,
65535, 65535
]
},
"rpi.ccm": {
},
"rpi.sharpen": {
"threshold": 0.25,
"limit": 1.0,
"strength": 1.0
}
}
grid_size = (32, 32)
target = 'pisp'
if __name__ == '__main__':
"""
initialise calibration
"""
if len(sys.argv) == 1:
print("""
PiSP Camera Tuning Tool version 1.0
Required Arguments:
'-i' : Calibration image directory.
'-o' : Name of output json file.
Optional Arguments:
'-c' : Config file for the CTT. If not passed, default parameters used.
'-l' : Name of output log file. If not passed, 'ctt_log.txt' used.
""")
quit(0)
else:
"""
parse input arguments
"""
json_output, directory, config, log_output = parse_input()
run_ctt(json_output, directory, config, log_output, json_template, grid_size, target)

View file

@ -19,6 +19,7 @@ class Encoder(json.JSONEncoder):
self.indentation_level = 0
self.hard_break = 120
self.custom_elems = {
'weights': 15,
'table': 16,
'luminance_lut': 16,
'ct_curve': 3,
@ -87,7 +88,7 @@ class Encoder(json.JSONEncoder):
return self.encode(o)
def pretty_print(in_json: dict) -> str:
def pretty_print(in_json: dict, custom_elems={}) -> str:
if 'version' not in in_json or \
'target' not in in_json or \
@ -95,7 +96,9 @@ def pretty_print(in_json: dict) -> str:
in_json['version'] < 2.0:
raise RuntimeError('Incompatible JSON dictionary has been provided')
return json.dumps(in_json, cls=Encoder, indent=4, sort_keys=False)
encoder = Encoder(indent=4, sort_keys=False)
encoder.custom_elems |= custom_elems
return encoder.encode(in_json) #json.dumps(in_json, cls=Encoder, indent=4, sort_keys=False)
if __name__ == "__main__":

View file

@ -67,7 +67,7 @@ Camera object that is the backbone of the tuning tool.
Input is the desired path of the output json.
"""
class Camera:
def __init__(self, jfile):
def __init__(self, jfile, json):
self.path = os.path.dirname(os.path.expanduser(__file__)) + '/'
if self.path == '/':
self.path = ''
@ -79,127 +79,15 @@ class Camera:
"""
initial json dict populated by uncalibrated values
"""
self.json = {
"rpi.black_level": {
"black_level": 4096
},
"rpi.dpc": {
},
"rpi.lux": {
"reference_shutter_speed": 10000,
"reference_gain": 1,
"reference_aperture": 1.0
},
"rpi.noise": {
},
"rpi.geq": {
},
"rpi.sdn": {
},
"rpi.awb": {
"priors": [
{"lux": 0, "prior": [2000, 1.0, 3000, 0.0, 13000, 0.0]},
{"lux": 800, "prior": [2000, 0.0, 6000, 2.0, 13000, 2.0]},
{"lux": 1500, "prior": [2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0]}
],
"modes": {
"auto": {"lo": 2500, "hi": 8000},
"incandescent": {"lo": 2500, "hi": 3000},
"tungsten": {"lo": 3000, "hi": 3500},
"fluorescent": {"lo": 4000, "hi": 4700},
"indoor": {"lo": 3000, "hi": 5000},
"daylight": {"lo": 5500, "hi": 6500},
"cloudy": {"lo": 7000, "hi": 8600}
},
"bayes": 1
},
"rpi.agc": {
"metering_modes": {
"centre-weighted": {
"weights": [3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0]
},
"spot": {
"weights": [2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
},
"matrix": {
"weights": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
}
},
"exposure_modes": {
"normal": {
"shutter": [100, 10000, 30000, 60000, 120000],
"gain": [1.0, 2.0, 4.0, 6.0, 6.0]
},
"short": {
"shutter": [100, 5000, 10000, 20000, 120000],
"gain": [1.0, 2.0, 4.0, 6.0, 6.0]
}
},
"constraint_modes": {
"normal": [
{"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]}
],
"highlight": [
{"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]},
{"bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.8, 1000, 0.8]}
]
},
"y_target": [0, 0.16, 1000, 0.165, 10000, 0.17]
},
"rpi.alsc": {
'omega': 1.3,
'n_iter': 100,
'luminance_strength': 0.7,
},
"rpi.contrast": {
"ce_enable": 1,
"gamma_curve": [
0, 0,
1024, 5040,
2048, 9338,
3072, 12356,
4096, 15312,
5120, 18051,
6144, 20790,
7168, 23193,
8192, 25744,
9216, 27942,
10240, 30035,
11264, 32005,
12288, 33975,
13312, 35815,
14336, 37600,
15360, 39168,
16384, 40642,
18432, 43379,
20480, 45749,
22528, 47753,
24576, 49621,
26624, 51253,
28672, 52698,
30720, 53796,
32768, 54876,
36864, 57012,
40960, 58656,
45056, 59954,
49152, 61183,
53248, 62355,
57344, 63419,
61440, 64476,
65535, 65535
]
},
"rpi.ccm": {
},
"rpi.sharpen": {
}
}
self.json = json
"""
Perform colour correction calibrations by comparing macbeth patch colours
to standard macbeth chart colours.
"""
def ccm_cal(self, do_alsc_colour):
def ccm_cal(self, do_alsc_colour, grid_size):
if 'rpi.ccm' in self.disable:
return 1
print('\nStarting CCM calibration')
@ -245,7 +133,7 @@ class Camera:
Do CCM calibration
"""
try:
ccms = ccm(self, cal_cr_list, cal_cb_list)
ccms = ccm(self, cal_cr_list, cal_cb_list, grid_size)
except ArithmeticError:
print('ERROR: Matrix is singular!\nTake new pictures and try again...')
self.log += '\nERROR: Singular matrix encountered during fit!'
@ -263,7 +151,7 @@ class Camera:
various colour temperatures, as well as providing a maximum 'wiggle room'
distance from this curve (transverse_neg/pos).
"""
def awb_cal(self, greyworld, do_alsc_colour):
def awb_cal(self, greyworld, do_alsc_colour, grid_size):
if 'rpi.awb' in self.disable:
return 1
print('\nStarting AWB calibration')
@ -306,7 +194,7 @@ class Camera:
call calibration function
"""
plot = "rpi.awb" in self.plot
awb_out = awb(self, cal_cr_list, cal_cb_list, plot)
awb_out = awb(self, cal_cr_list, cal_cb_list, plot, grid_size)
ct_curve, transverse_neg, transverse_pos = awb_out
"""
write output to json
@ -324,7 +212,7 @@ class Camera:
colour channel seperately, and then partially corrects for vignetting.
The extent of the correction depends on the 'luminance_strength' parameter.
"""
def alsc_cal(self, luminance_strength, do_alsc_colour):
def alsc_cal(self, luminance_strength, do_alsc_colour, grid_size):
if 'rpi.alsc' in self.disable:
return 1
print('\nStarting ALSC calibration')
@ -347,7 +235,7 @@ class Camera:
call calibration function
"""
plot = "rpi.alsc" in self.plot
alsc_out = alsc_all(self, do_alsc_colour, plot)
alsc_out = alsc_all(self, do_alsc_colour, plot, grid_size)
cal_cr_list, cal_cb_list, luminance_lut, av_corn = alsc_out
"""
write output to json and finish if not do_alsc_colour
@ -393,7 +281,7 @@ class Camera:
"""
obtain worst-case scenario residual sigmas
"""
sigma_r, sigma_b = get_sigma(self, cal_cr_list, cal_cb_list)
sigma_r, sigma_b = get_sigma(self, cal_cr_list, cal_cb_list, grid_size)
"""
write output to json
"""
@ -509,19 +397,20 @@ class Camera:
"""
writes the json dictionary to the raw json file then make pretty
"""
def write_json(self):
def write_json(self, version=2.0, target='bcm2835', grid_size=(16, 12)):
"""
Write json dictionary to file using our version 2 format
"""
out_json = {
"version": 2.0,
'target': 'bcm2835',
"version": version,
'target': target if target != 'vc4' else 'bcm2835',
"algorithms": [{name: data} for name, data in self.json.items()],
}
with open(self.jf, 'w') as f:
f.write(pretty_print(out_json))
f.write(pretty_print(out_json,
custom_elems={'table': grid_size[0], 'luminance_lut': grid_size[0]}))
"""
add a new section to the log file
@ -712,7 +601,7 @@ class Camera:
return 0
def run_ctt(json_output, directory, config, log_output, alsc_only=False):
def run_ctt(json_output, directory, config, log_output, json_template, grid_size, target, alsc_only=False):
"""
check input files are jsons
"""
@ -748,7 +637,7 @@ def run_ctt(json_output, directory, config, log_output, alsc_only=False):
greyworld = get_config(awb_d, "greyworld", 0, 'bool')
alsc_d = get_config(configs, "alsc", {}, 'dict')
do_alsc_colour = get_config(alsc_d, "do_alsc_colour", 1, 'bool')
luminance_strength = get_config(alsc_d, "luminance_strength", 0.5, 'num')
luminance_strength = get_config(alsc_d, "luminance_strength", 0.8, 'num')
blacklevel = get_config(configs, "blacklevel", -1, 'num')
macbeth_d = get_config(configs, "macbeth", {}, 'dict')
mac_small = get_config(macbeth_d, "small", 0, 'bool')
@ -772,7 +661,7 @@ def run_ctt(json_output, directory, config, log_output, alsc_only=False):
initialise tuning tool and load images
"""
try:
Cam = Camera(json_output)
Cam = Camera(json_output, json=json_template)
Cam.log_user_input(json_output, directory, config, log_output)
if alsc_only:
disable = set(Cam.json.keys()).symmetric_difference({"rpi.alsc"})
@ -794,14 +683,16 @@ def run_ctt(json_output, directory, config, log_output, alsc_only=False):
Cam.json['rpi.black_level']['black_level'] = Cam.blacklevel_16
Cam.json_remove(disable)
print('\nSTARTING CALIBRATIONS')
Cam.alsc_cal(luminance_strength, do_alsc_colour)
Cam.alsc_cal(luminance_strength, do_alsc_colour, grid_size)
Cam.geq_cal()
Cam.lux_cal()
Cam.noise_cal()
Cam.awb_cal(greyworld, do_alsc_colour)
Cam.ccm_cal(do_alsc_colour)
Cam.cac_cal(do_alsc_colour)
Cam.awb_cal(greyworld, do_alsc_colour, grid_size)
Cam.ccm_cal(do_alsc_colour, grid_size)
print('\nFINISHED CALIBRATIONS')
Cam.write_json()
Cam.write_json(target=target, grid_size=grid_size)
Cam.write_log(log_output)
print('\nCalibrations written to: '+json_output)
if log_output is None:
@ -810,28 +701,3 @@ def run_ctt(json_output, directory, config, log_output, alsc_only=False):
pass
else:
Cam.write_log(log_output)
if __name__ == '__main__':
"""
initialise calibration
"""
if len(sys.argv) == 1:
print("""
Pisp Camera Tuning Tool version 1.0
Required Arguments:
'-i' : Calibration image directory.
'-o' : Name of output json file.
Optional Arguments:
'-c' : Config file for the CTT. If not passed, default parameters used.
'-l' : Name of output log file. If not passed, 'ctt_log.txt' used.
""")
quit(0)
else:
"""
parse input arguments
"""
json_output, directory, config, log_output = parse_input()
run_ctt(json_output, directory, config, log_output)

157
utils/raspberrypi/ctt/ctt_vc4.py Executable file
View file

@ -0,0 +1,157 @@
#!/usr/bin/env python3
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# ctt_vc4.py - camera tuning tool for VC4 platforms
import os
import sys
from ctt_run import run_ctt
from ctt_tools import parse_input
json_template = {
"rpi.black_level": {
"black_level": 4096
},
"rpi.dpc": {
},
"rpi.lux": {
"reference_shutter_speed": 10000,
"reference_gain": 1,
"reference_aperture": 1.0
},
"rpi.noise": {
},
"rpi.geq": {
},
"rpi.sdn": {
},
"rpi.awb": {
"priors": [
{"lux": 0, "prior": [2000, 1.0, 3000, 0.0, 13000, 0.0]},
{"lux": 800, "prior": [2000, 0.0, 6000, 2.0, 13000, 2.0]},
{"lux": 1500, "prior": [2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0]}
],
"modes": {
"auto": {"lo": 2500, "hi": 8000},
"incandescent": {"lo": 2500, "hi": 3000},
"tungsten": {"lo": 3000, "hi": 3500},
"fluorescent": {"lo": 4000, "hi": 4700},
"indoor": {"lo": 3000, "hi": 5000},
"daylight": {"lo": 5500, "hi": 6500},
"cloudy": {"lo": 7000, "hi": 8600}
},
"bayes": 1
},
"rpi.agc": {
"metering_modes": {
"centre-weighted": {
"weights": [3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0]
},
"spot": {
"weights": [2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
},
"matrix": {
"weights": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
}
},
"exposure_modes": {
"normal": {
"shutter": [100, 10000, 30000, 60000, 120000],
"gain": [1.0, 2.0, 4.0, 6.0, 6.0]
},
"short": {
"shutter": [100, 5000, 10000, 20000, 120000],
"gain": [1.0, 2.0, 4.0, 6.0, 6.0]
}
},
"constraint_modes": {
"normal": [
{"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]}
],
"highlight": [
{"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]},
{"bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.8, 1000, 0.8]}
]
},
"y_target": [0, 0.16, 1000, 0.165, 10000, 0.17]
},
"rpi.alsc": {
'omega': 1.3,
'n_iter': 100,
'luminance_strength': 0.7,
},
"rpi.contrast": {
"ce_enable": 1,
"gamma_curve": [
0, 0,
1024, 5040,
2048, 9338,
3072, 12356,
4096, 15312,
5120, 18051,
6144, 20790,
7168, 23193,
8192, 25744,
9216, 27942,
10240, 30035,
11264, 32005,
12288, 33975,
13312, 35815,
14336, 37600,
15360, 39168,
16384, 40642,
18432, 43379,
20480, 45749,
22528, 47753,
24576, 49621,
26624, 51253,
28672, 52698,
30720, 53796,
32768, 54876,
36864, 57012,
40960, 58656,
45056, 59954,
49152, 61183,
53248, 62355,
57344, 63419,
61440, 64476,
65535, 65535
]
},
"rpi.ccm": {
},
"rpi.sharpen": {
}
}
grid_size = (16, 12)
target = 'bcm2835'
if __name__ == '__main__':
"""
initialise calibration
"""
if len(sys.argv) == 1:
print("""
VC4 Camera Tuning Tool version 1.0
Required Arguments:
'-i' : Calibration image directory.
'-o' : Name of output json file.
Optional Arguments:
'-c' : Config file for the CTT. If not passed, default parameters used.
'-l' : Name of output log file. If not passed, 'ctt_log.txt' used.
""")
quit(0)
else:
"""
parse input arguments
"""
json_output, directory, config, log_output = parse_input()
run_ctt(json_output, directory, config, log_output, json_template, grid_size, target)