utils: raspberrypi: ctt: Fix pycodestyle E231
E231 missing whitespace after ',' E231 missing whitespace after ':' Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Reviewed-by: Kieran Bingham <kieran.bingham@ideasonboard.com> Reviewed-by: David Plowman <david.plowman@raspberrypi.com>
This commit is contained in:
parent
7a653369cb
commit
93a133fb17
11 changed files with 493 additions and 493 deletions
|
@ -41,15 +41,15 @@ def get_col_lux(string):
|
||||||
"""
|
"""
|
||||||
Extract colour and lux values from filename
|
Extract colour and lux values from filename
|
||||||
"""
|
"""
|
||||||
col = re.search('([0-9]+)[kK](\.(jpg|jpeg|brcm|dng)|_.*\.(jpg|jpeg|brcm|dng))$',string)
|
col = re.search('([0-9]+)[kK](\.(jpg|jpeg|brcm|dng)|_.*\.(jpg|jpeg|brcm|dng))$', string)
|
||||||
lux = re.search('([0-9]+)[lL](\.(jpg|jpeg|brcm|dng)|_.*\.(jpg|jpeg|brcm|dng))$',string)
|
lux = re.search('([0-9]+)[lL](\.(jpg|jpeg|brcm|dng)|_.*\.(jpg|jpeg|brcm|dng))$', string)
|
||||||
try:
|
try:
|
||||||
col = col.group(1)
|
col = col.group(1)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
"""
|
"""
|
||||||
Catch error if images labelled incorrectly and pass reasonable defaults
|
Catch error if images labelled incorrectly and pass reasonable defaults
|
||||||
"""
|
"""
|
||||||
return None,None
|
return None, None
|
||||||
try:
|
try:
|
||||||
lux = lux.group(1)
|
lux = lux.group(1)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
|
@ -57,15 +57,15 @@ def get_col_lux(string):
|
||||||
Catch error if images labelled incorrectly and pass reasonable defaults
|
Catch error if images labelled incorrectly and pass reasonable defaults
|
||||||
Still returns colour if that has been found.
|
Still returns colour if that has been found.
|
||||||
"""
|
"""
|
||||||
return col,None
|
return col, None
|
||||||
return int( col ),int( lux )
|
return int( col ), int( lux )
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Camera object that is the backbone of the tuning tool.
|
Camera object that is the backbone of the tuning tool.
|
||||||
Input is the desired path of the output json.
|
Input is the desired path of the output json.
|
||||||
"""
|
"""
|
||||||
class Camera:
|
class Camera:
|
||||||
def __init__(self,jfile):
|
def __init__(self, jfile):
|
||||||
self.path = os.path.dirname(os.path.expanduser(__file__)) + '/'
|
self.path = os.path.dirname(os.path.expanduser(__file__)) + '/'
|
||||||
if self.path == '/':
|
if self.path == '/':
|
||||||
self.path = ''
|
self.path = ''
|
||||||
|
@ -96,9 +96,9 @@ class Camera:
|
||||||
},
|
},
|
||||||
"rpi.awb": {
|
"rpi.awb": {
|
||||||
"priors" : [
|
"priors" : [
|
||||||
{"lux": 0,"prior":[ 2000, 1.0, 3000, 0.0, 13000, 0.0]},
|
{"lux": 0, "prior": [ 2000, 1.0, 3000, 0.0, 13000, 0.0]},
|
||||||
{"lux": 800,"prior":[ 2000, 0.0, 6000, 2.0, 13000, 2.0]},
|
{"lux": 800, "prior": [ 2000, 0.0, 6000, 2.0, 13000, 2.0]},
|
||||||
{"lux": 1500,"prior":[ 2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0]}
|
{"lux": 1500, "prior": [ 2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0]}
|
||||||
],
|
],
|
||||||
"modes" : {
|
"modes" : {
|
||||||
"auto" : { "lo" : 2500, "hi" : 8000 },
|
"auto" : { "lo" : 2500, "hi" : 8000 },
|
||||||
|
@ -189,7 +189,7 @@ class Camera:
|
||||||
},
|
},
|
||||||
"rpi.ccm": {
|
"rpi.ccm": {
|
||||||
},
|
},
|
||||||
"rpi.sharpen":{
|
"rpi.sharpen": {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -198,7 +198,7 @@ class Camera:
|
||||||
Perform colour correction calibrations by comparing macbeth patch colours
|
Perform colour correction calibrations by comparing macbeth patch colours
|
||||||
to standard macbeth chart colours.
|
to standard macbeth chart colours.
|
||||||
"""
|
"""
|
||||||
def ccm_cal(self,do_alsc_colour):
|
def ccm_cal(self, do_alsc_colour):
|
||||||
if 'rpi.ccm' in self.disable:
|
if 'rpi.ccm' in self.disable:
|
||||||
return 1
|
return 1
|
||||||
print('\nStarting CCM calibration')
|
print('\nStarting CCM calibration')
|
||||||
|
@ -227,7 +227,7 @@ class Camera:
|
||||||
cal_cb_list = self.json['rpi.alsc']['calibrations_Cb']
|
cal_cb_list = self.json['rpi.alsc']['calibrations_Cb']
|
||||||
self.log += '\nALSC tables found successfully'
|
self.log += '\nALSC tables found successfully'
|
||||||
except KeyError:
|
except KeyError:
|
||||||
cal_cr_list,cal_cb_list=None,None
|
cal_cr_list, cal_cb_list=None, None
|
||||||
print('WARNING! No ALSC tables found for CCM!')
|
print('WARNING! No ALSC tables found for CCM!')
|
||||||
print('Performing CCM calibrations without ALSC correction...')
|
print('Performing CCM calibrations without ALSC correction...')
|
||||||
self.log += '\nWARNING: No ALSC tables found.\nCCM calibration '
|
self.log += '\nWARNING: No ALSC tables found.\nCCM calibration '
|
||||||
|
@ -236,7 +236,7 @@ class Camera:
|
||||||
"""
|
"""
|
||||||
case where config options result in CCM done without ALSC colour tables
|
case where config options result in CCM done without ALSC colour tables
|
||||||
"""
|
"""
|
||||||
cal_cr_list,cal_cb_list=None,None
|
cal_cr_list, cal_cb_list=None, None
|
||||||
self.log += '\nWARNING: No ALSC tables found.\nCCM calibration '
|
self.log += '\nWARNING: No ALSC tables found.\nCCM calibration '
|
||||||
self.log += 'performed without ALSC correction...'
|
self.log += 'performed without ALSC correction...'
|
||||||
|
|
||||||
|
@ -244,7 +244,7 @@ class Camera:
|
||||||
Do CCM calibration
|
Do CCM calibration
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
ccms = ccm(self,cal_cr_list,cal_cb_list)
|
ccms = ccm(self, cal_cr_list, cal_cb_list)
|
||||||
except ArithmeticError:
|
except ArithmeticError:
|
||||||
print('ERROR: Matrix is singular!\nTake new pictures and try again...')
|
print('ERROR: Matrix is singular!\nTake new pictures and try again...')
|
||||||
self.log += '\nERROR: Singular matrix encountered during fit!'
|
self.log += '\nERROR: Singular matrix encountered during fit!'
|
||||||
|
@ -262,7 +262,7 @@ class Camera:
|
||||||
various colour temperatures, as well as providing a maximum 'wiggle room'
|
various colour temperatures, as well as providing a maximum 'wiggle room'
|
||||||
distance from this curve (transverse_neg/pos).
|
distance from this curve (transverse_neg/pos).
|
||||||
"""
|
"""
|
||||||
def awb_cal(self,greyworld,do_alsc_colour):
|
def awb_cal(self, greyworld, do_alsc_colour):
|
||||||
if 'rpi.awb' in self.disable:
|
if 'rpi.awb' in self.disable:
|
||||||
return 1
|
return 1
|
||||||
print('\nStarting AWB calibration')
|
print('\nStarting AWB calibration')
|
||||||
|
@ -292,21 +292,21 @@ class Camera:
|
||||||
cal_cb_list = self.json['rpi.alsc']['calibrations_Cb']
|
cal_cb_list = self.json['rpi.alsc']['calibrations_Cb']
|
||||||
self.log += '\nALSC tables found successfully'
|
self.log += '\nALSC tables found successfully'
|
||||||
except KeyError:
|
except KeyError:
|
||||||
cal_cr_list,cal_cb_list=None,None
|
cal_cr_list, cal_cb_list=None, None
|
||||||
print('ERROR, no ALSC calibrations found for AWB')
|
print('ERROR, no ALSC calibrations found for AWB')
|
||||||
print('Performing AWB without ALSC tables')
|
print('Performing AWB without ALSC tables')
|
||||||
self.log += '\nWARNING: No ALSC tables found.\nAWB calibration '
|
self.log += '\nWARNING: No ALSC tables found.\nAWB calibration '
|
||||||
self.log += 'performed without ALSC correction...'
|
self.log += 'performed without ALSC correction...'
|
||||||
else:
|
else:
|
||||||
cal_cr_list,cal_cb_list=None,None
|
cal_cr_list, cal_cb_list=None, None
|
||||||
self.log += '\nWARNING: No ALSC tables found.\nAWB calibration '
|
self.log += '\nWARNING: No ALSC tables found.\nAWB calibration '
|
||||||
self.log += 'performed without ALSC correction...'
|
self.log += 'performed without ALSC correction...'
|
||||||
"""
|
"""
|
||||||
call calibration function
|
call calibration function
|
||||||
"""
|
"""
|
||||||
plot = "rpi.awb" in self.plot
|
plot = "rpi.awb" in self.plot
|
||||||
awb_out = awb(self,cal_cr_list,cal_cb_list,plot)
|
awb_out = awb(self, cal_cr_list, cal_cb_list, plot)
|
||||||
ct_curve,transverse_neg,transverse_pos = awb_out
|
ct_curve, transverse_neg, transverse_pos = awb_out
|
||||||
"""
|
"""
|
||||||
write output to json
|
write output to json
|
||||||
"""
|
"""
|
||||||
|
@ -323,7 +323,7 @@ class Camera:
|
||||||
colour channel seperately, and then partially corrects for vignetting.
|
colour channel seperately, and then partially corrects for vignetting.
|
||||||
The extent of the correction depends on the 'luminance_strength' parameter.
|
The extent of the correction depends on the 'luminance_strength' parameter.
|
||||||
"""
|
"""
|
||||||
def alsc_cal(self,luminance_strength,do_alsc_colour):
|
def alsc_cal(self, luminance_strength, do_alsc_colour):
|
||||||
if 'rpi.alsc' in self.disable:
|
if 'rpi.alsc' in self.disable:
|
||||||
return 1
|
return 1
|
||||||
print('\nStarting ALSC calibration')
|
print('\nStarting ALSC calibration')
|
||||||
|
@ -346,8 +346,8 @@ class Camera:
|
||||||
call calibration function
|
call calibration function
|
||||||
"""
|
"""
|
||||||
plot = "rpi.alsc" in self.plot
|
plot = "rpi.alsc" in self.plot
|
||||||
alsc_out = alsc_all(self,do_alsc_colour,plot)
|
alsc_out = alsc_all(self, do_alsc_colour, plot)
|
||||||
cal_cr_list,cal_cb_list,luminance_lut,av_corn = alsc_out
|
cal_cr_list, cal_cb_list, luminance_lut, av_corn = alsc_out
|
||||||
"""
|
"""
|
||||||
write ouput to json and finish if not do_alsc_colour
|
write ouput to json and finish if not do_alsc_colour
|
||||||
"""
|
"""
|
||||||
|
@ -392,12 +392,12 @@ class Camera:
|
||||||
"""
|
"""
|
||||||
obtain worst-case scenario residual sigmas
|
obtain worst-case scenario residual sigmas
|
||||||
"""
|
"""
|
||||||
sigma_r,sigma_b = get_sigma(self,cal_cr_list,cal_cb_list)
|
sigma_r, sigma_b = get_sigma(self, cal_cr_list, cal_cb_list)
|
||||||
"""
|
"""
|
||||||
write output to json
|
write output to json
|
||||||
"""
|
"""
|
||||||
self.json['rpi.alsc']['sigma'] = np.round(sigma_r,5)
|
self.json['rpi.alsc']['sigma'] = np.round(sigma_r, 5)
|
||||||
self.json['rpi.alsc']['sigma_Cb'] = np.round(sigma_b,5)
|
self.json['rpi.alsc']['sigma_Cb'] = np.round(sigma_b, 5)
|
||||||
self.log += '\nCalibrated sigmas written to json file'
|
self.log += '\nCalibrated sigmas written to json file'
|
||||||
print('Finished ALSC calibrations')
|
print('Finished ALSC calibrations')
|
||||||
|
|
||||||
|
@ -417,7 +417,7 @@ class Camera:
|
||||||
perform calibration
|
perform calibration
|
||||||
"""
|
"""
|
||||||
plot = 'rpi.geq' in self.plot
|
plot = 'rpi.geq' in self.plot
|
||||||
slope,offset = geq_fit(self,plot)
|
slope, offset = geq_fit(self, plot)
|
||||||
"""
|
"""
|
||||||
write output to json
|
write output to json
|
||||||
"""
|
"""
|
||||||
|
@ -441,7 +441,7 @@ class Camera:
|
||||||
image with lux level closest to 1000 is chosen.
|
image with lux level closest to 1000 is chosen.
|
||||||
"""
|
"""
|
||||||
luxes = [Img.lux for Img in self.imgs]
|
luxes = [Img.lux for Img in self.imgs]
|
||||||
argmax = luxes.index(min(luxes, key=lambda l:abs(1000-l)))
|
argmax = luxes.index(min(luxes, key=lambda l: abs(1000-l)))
|
||||||
Img = self.imgs[argmax]
|
Img = self.imgs[argmax]
|
||||||
self.log += '\nLux found closest to 1000: {} lx'.format(Img.lux)
|
self.log += '\nLux found closest to 1000: {} lx'.format(Img.lux)
|
||||||
self.log += '\nImage used: ' + Img.name
|
self.log += '\nImage used: ' + Img.name
|
||||||
|
@ -450,7 +450,7 @@ class Camera:
|
||||||
"""
|
"""
|
||||||
do calibration
|
do calibration
|
||||||
"""
|
"""
|
||||||
lux_out,shutter_speed,gain = lux(self,Img)
|
lux_out, shutter_speed, gain = lux(self, Img)
|
||||||
"""
|
"""
|
||||||
write output to json
|
write output to json
|
||||||
"""
|
"""
|
||||||
|
@ -474,28 +474,28 @@ class Camera:
|
||||||
run calibration on all images and sort by slope.
|
run calibration on all images and sort by slope.
|
||||||
"""
|
"""
|
||||||
plot = "rpi.noise" in self.plot
|
plot = "rpi.noise" in self.plot
|
||||||
noise_out = sorted([noise(self,Img,plot) for Img in self.imgs], key = lambda x:x[0])
|
noise_out = sorted([noise(self, Img, plot) for Img in self.imgs], key = lambda x: x[0])
|
||||||
self.log += '\nFinished processing images'
|
self.log += '\nFinished processing images'
|
||||||
"""
|
"""
|
||||||
take the average of the interquartile
|
take the average of the interquartile
|
||||||
"""
|
"""
|
||||||
l = len(noise_out)
|
l = len(noise_out)
|
||||||
noise_out = np.mean(noise_out[l//4:1+3*l//4],axis=0)
|
noise_out = np.mean(noise_out[l//4:1+3*l//4], axis=0)
|
||||||
self.log += '\nAverage noise profile: constant = {} '.format(int(noise_out[1]))
|
self.log += '\nAverage noise profile: constant = {} '.format(int(noise_out[1]))
|
||||||
self.log += 'slope = {:.3f}'.format(noise_out[0])
|
self.log += 'slope = {:.3f}'.format(noise_out[0])
|
||||||
"""
|
"""
|
||||||
write to json
|
write to json
|
||||||
"""
|
"""
|
||||||
self.json['rpi.noise']['reference_constant'] = int(noise_out[1])
|
self.json['rpi.noise']['reference_constant'] = int(noise_out[1])
|
||||||
self.json['rpi.noise']['reference_slope'] = round(noise_out[0],3)
|
self.json['rpi.noise']['reference_slope'] = round(noise_out[0], 3)
|
||||||
self.log += '\nNOISE calibrations written to json'
|
self.log += '\nNOISE calibrations written to json'
|
||||||
print('Finished NOISE calibrations')
|
print('Finished NOISE calibrations')
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Removes json entries that are turned off
|
Removes json entries that are turned off
|
||||||
"""
|
"""
|
||||||
def json_remove(self,disable):
|
def json_remove(self, disable):
|
||||||
self.log_new_sec('Disabling Options',cal=False)
|
self.log_new_sec('Disabling Options', cal=False)
|
||||||
if len(self.disable) == 0:
|
if len(self.disable) == 0:
|
||||||
self.log += '\nNothing disabled!'
|
self.log += '\nNothing disabled!'
|
||||||
return 1
|
return 1
|
||||||
|
@ -512,16 +512,16 @@ class Camera:
|
||||||
"""
|
"""
|
||||||
Write json dictionary to file
|
Write json dictionary to file
|
||||||
"""
|
"""
|
||||||
jstring = json.dumps(self.json,sort_keys=False)
|
jstring = json.dumps(self.json, sort_keys=False)
|
||||||
"""
|
"""
|
||||||
make it pretty :)
|
make it pretty :)
|
||||||
"""
|
"""
|
||||||
pretty_print_json(jstring,self.jf)
|
pretty_print_json(jstring, self.jf)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
add a new section to the log file
|
add a new section to the log file
|
||||||
"""
|
"""
|
||||||
def log_new_sec(self,section,cal=True):
|
def log_new_sec(self, section, cal=True):
|
||||||
self.log += '\n'+self.log_separator
|
self.log += '\n'+self.log_separator
|
||||||
self.log += section
|
self.log += section
|
||||||
if cal:
|
if cal:
|
||||||
|
@ -531,8 +531,8 @@ class Camera:
|
||||||
"""
|
"""
|
||||||
write script arguments to log file
|
write script arguments to log file
|
||||||
"""
|
"""
|
||||||
def log_user_input(self,json_output,directory,config,log_output):
|
def log_user_input(self, json_output, directory, config, log_output):
|
||||||
self.log_new_sec('User Arguments',cal=False)
|
self.log_new_sec('User Arguments', cal=False)
|
||||||
self.log += '\nJson file output: ' + json_output
|
self.log += '\nJson file output: ' + json_output
|
||||||
self.log += '\nCalibration images directory: ' + directory
|
self.log += '\nCalibration images directory: ' + directory
|
||||||
if config == None:
|
if config == None:
|
||||||
|
@ -555,19 +555,19 @@ class Camera:
|
||||||
"""
|
"""
|
||||||
write log file
|
write log file
|
||||||
"""
|
"""
|
||||||
def write_log(self,filename):
|
def write_log(self, filename):
|
||||||
if filename == None:
|
if filename == None:
|
||||||
filename = 'ctt_log.txt'
|
filename = 'ctt_log.txt'
|
||||||
self.log += '\n' + self.log_separator
|
self.log += '\n' + self.log_separator
|
||||||
with open(filename,'w') as logfile:
|
with open(filename, 'w') as logfile:
|
||||||
logfile.write(self.log)
|
logfile.write(self.log)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Add all images from directory, pass into relevant list of images and
|
Add all images from directory, pass into relevant list of images and
|
||||||
extrace lux and temperature values.
|
extrace lux and temperature values.
|
||||||
"""
|
"""
|
||||||
def add_imgs(self,directory,mac_config,blacklevel=-1):
|
def add_imgs(self, directory, mac_config, blacklevel=-1):
|
||||||
self.log_new_sec('Image Loading',cal=False)
|
self.log_new_sec('Image Loading', cal=False)
|
||||||
img_suc_msg = 'Image loaded successfully!'
|
img_suc_msg = 'Image loaded successfully!'
|
||||||
print('\n\nLoading images from '+directory)
|
print('\n\nLoading images from '+directory)
|
||||||
self.log += '\nDirectory: ' + directory
|
self.log += '\nDirectory: ' + directory
|
||||||
|
@ -588,12 +588,12 @@ class Camera:
|
||||||
"""
|
"""
|
||||||
obtain colour and lux value
|
obtain colour and lux value
|
||||||
"""
|
"""
|
||||||
col,lux = get_col_lux(filename)
|
col, lux = get_col_lux(filename)
|
||||||
"""
|
"""
|
||||||
Check if image is an alsc calibration image
|
Check if image is an alsc calibration image
|
||||||
"""
|
"""
|
||||||
if 'alsc' in filename:
|
if 'alsc' in filename:
|
||||||
Img = load_image(self,address,mac=False)
|
Img = load_image(self, address, mac=False)
|
||||||
self.log += '\nIdentified as an ALSC image'
|
self.log += '\nIdentified as an ALSC image'
|
||||||
"""
|
"""
|
||||||
check if imagae data has been successfully unpacked
|
check if imagae data has been successfully unpacked
|
||||||
|
@ -633,7 +633,7 @@ class Camera:
|
||||||
self.log += '\nWARNING: Error reading lux value'
|
self.log += '\nWARNING: Error reading lux value'
|
||||||
self.log += '\nImage discarded!'
|
self.log += '\nImage discarded!'
|
||||||
continue
|
continue
|
||||||
Img = load_image(self,address,mac_config)
|
Img = load_image(self, address, mac_config)
|
||||||
"""
|
"""
|
||||||
check that image data has been successfuly unpacked
|
check that image data has been successfuly unpacked
|
||||||
"""
|
"""
|
||||||
|
@ -645,7 +645,7 @@ class Camera:
|
||||||
"""
|
"""
|
||||||
if successful, append to list and continue to next image
|
if successful, append to list and continue to next image
|
||||||
"""
|
"""
|
||||||
Img.col,Img.lux = col,lux
|
Img.col, Img.lux = col, lux
|
||||||
Img.name = filename
|
Img.name = filename
|
||||||
self.log += '\nColour temperature: {} K'.format(col)
|
self.log += '\nColour temperature: {} K'.format(col)
|
||||||
self.log += '\nLux value: {} lx'.format(lux)
|
self.log += '\nLux value: {} lx'.format(lux)
|
||||||
|
@ -683,7 +683,7 @@ class Camera:
|
||||||
patterns = list(set([Img.pattern for Img in all_imgs]))
|
patterns = list(set([Img.pattern for Img in all_imgs]))
|
||||||
sigbitss = list(set([Img.sigbits for Img in all_imgs]))
|
sigbitss = list(set([Img.sigbits for Img in all_imgs]))
|
||||||
blacklevels = list(set([Img.blacklevel_16 for Img in all_imgs]))
|
blacklevels = list(set([Img.blacklevel_16 for Img in all_imgs]))
|
||||||
sizes = list(set([(Img.w,Img.h) for Img in all_imgs]))
|
sizes = list(set([(Img.w, Img.h) for Img in all_imgs]))
|
||||||
|
|
||||||
if len(camNames)==1 and len(patterns)==1 and len(sigbitss)==1 and len(blacklevels) ==1 and len(sizes)== 1:
|
if len(camNames)==1 and len(patterns)==1 and len(sigbitss)==1 and len(blacklevels) ==1 and len(sizes)== 1:
|
||||||
self.grey = (patterns[0] == 128)
|
self.grey = (patterns[0] == 128)
|
||||||
|
@ -694,14 +694,14 @@ class Camera:
|
||||||
self.log += '\nGreyscale camera identified'
|
self.log += '\nGreyscale camera identified'
|
||||||
self.log += '\nSignificant bits: {}'.format(sigbitss[0])
|
self.log += '\nSignificant bits: {}'.format(sigbitss[0])
|
||||||
self.log += '\nBlacklevel: {}'.format(blacklevels[0])
|
self.log += '\nBlacklevel: {}'.format(blacklevels[0])
|
||||||
self.log += '\nImage size: w = {} h = {}'.format(sizes[0][0],sizes[0][1])
|
self.log += '\nImage size: w = {} h = {}'.format(sizes[0][0], sizes[0][1])
|
||||||
return 1
|
return 1
|
||||||
else:
|
else:
|
||||||
print('\nERROR: Images from different cameras')
|
print('\nERROR: Images from different cameras')
|
||||||
self.log += '\nERROR: Images are from different cameras'
|
self.log += '\nERROR: Images are from different cameras'
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
def run_ctt(json_output,directory,config,log_output):
|
def run_ctt(json_output, directory, config, log_output):
|
||||||
"""
|
"""
|
||||||
check input files are jsons
|
check input files are jsons
|
||||||
"""
|
"""
|
||||||
|
@ -717,7 +717,7 @@ def run_ctt(json_output,directory,config,log_output):
|
||||||
read configurations
|
read configurations
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
with open(config,'r') as config_json:
|
with open(config, 'r') as config_json:
|
||||||
configs = json.load(config_json)
|
configs = json.load(config_json)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
configs = {}
|
configs = {}
|
||||||
|
@ -731,18 +731,18 @@ def run_ctt(json_output,directory,config,log_output):
|
||||||
"""
|
"""
|
||||||
load configurations from config file, if not given then set default
|
load configurations from config file, if not given then set default
|
||||||
"""
|
"""
|
||||||
disable = get_config(configs,"disable",[],'list')
|
disable = get_config(configs, "disable", [], 'list')
|
||||||
plot = get_config(configs,"plot",[],'list')
|
plot = get_config(configs, "plot", [], 'list')
|
||||||
awb_d = get_config(configs,"awb",{},'dict')
|
awb_d = get_config(configs, "awb", {}, 'dict')
|
||||||
greyworld = get_config(awb_d,"greyworld",0,'bool')
|
greyworld = get_config(awb_d, "greyworld", 0, 'bool')
|
||||||
alsc_d = get_config(configs,"alsc",{},'dict')
|
alsc_d = get_config(configs, "alsc", {}, 'dict')
|
||||||
do_alsc_colour = get_config(alsc_d,"do_alsc_colour",1,'bool')
|
do_alsc_colour = get_config(alsc_d, "do_alsc_colour", 1, 'bool')
|
||||||
luminance_strength = get_config(alsc_d,"luminance_strength",0.5,'num')
|
luminance_strength = get_config(alsc_d, "luminance_strength", 0.5, 'num')
|
||||||
blacklevel = get_config(configs,"blacklevel",-1,'num')
|
blacklevel = get_config(configs, "blacklevel", -1, 'num')
|
||||||
macbeth_d = get_config(configs,"macbeth",{},'dict')
|
macbeth_d = get_config(configs, "macbeth", {}, 'dict')
|
||||||
mac_small = get_config(macbeth_d,"small",0,'bool')
|
mac_small = get_config(macbeth_d, "small", 0, 'bool')
|
||||||
mac_show = get_config(macbeth_d,"show",0,'bool')
|
mac_show = get_config(macbeth_d, "show", 0, 'bool')
|
||||||
mac_config = (mac_small,mac_show)
|
mac_config = (mac_small, mac_show)
|
||||||
|
|
||||||
if blacklevel < -1 or blacklevel >= 2**16:
|
if blacklevel < -1 or blacklevel >= 2**16:
|
||||||
print('\nInvalid blacklevel, defaulted to 64')
|
print('\nInvalid blacklevel, defaulted to 64')
|
||||||
|
@ -762,10 +762,10 @@ def run_ctt(json_output,directory,config,log_output):
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
Cam = Camera(json_output)
|
Cam = Camera(json_output)
|
||||||
Cam.log_user_input(json_output,directory,config,log_output)
|
Cam.log_user_input(json_output, directory, config, log_output)
|
||||||
Cam.disable = disable
|
Cam.disable = disable
|
||||||
Cam.plot = plot
|
Cam.plot = plot
|
||||||
Cam.add_imgs(directory,mac_config,blacklevel)
|
Cam.add_imgs(directory, mac_config, blacklevel)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
raise ArgError('\n\nError: Input image directory not found!')
|
raise ArgError('\n\nError: Input image directory not found!')
|
||||||
|
|
||||||
|
@ -781,11 +781,11 @@ def run_ctt(json_output,directory,config,log_output):
|
||||||
Cam.json['rpi.black_level']['black_level'] = Cam.blacklevel_16
|
Cam.json['rpi.black_level']['black_level'] = Cam.blacklevel_16
|
||||||
Cam.json_remove(disable)
|
Cam.json_remove(disable)
|
||||||
print('\nSTARTING CALIBRATIONS')
|
print('\nSTARTING CALIBRATIONS')
|
||||||
Cam.alsc_cal(luminance_strength,do_alsc_colour)
|
Cam.alsc_cal(luminance_strength, do_alsc_colour)
|
||||||
Cam.geq_cal()
|
Cam.geq_cal()
|
||||||
Cam.lux_cal()
|
Cam.lux_cal()
|
||||||
Cam.noise_cal()
|
Cam.noise_cal()
|
||||||
Cam.awb_cal(greyworld,do_alsc_colour)
|
Cam.awb_cal(greyworld, do_alsc_colour)
|
||||||
Cam.ccm_cal(do_alsc_colour)
|
Cam.ccm_cal(do_alsc_colour)
|
||||||
print('\nFINISHED CALIBRATIONS')
|
print('\nFINISHED CALIBRATIONS')
|
||||||
Cam.write_json()
|
Cam.write_json()
|
||||||
|
@ -819,5 +819,5 @@ if __name__ == '__main__':
|
||||||
"""
|
"""
|
||||||
parse input arguments
|
parse input arguments
|
||||||
"""
|
"""
|
||||||
json_output,directory,config,log_output = parse_input()
|
json_output, directory, config, log_output = parse_input()
|
||||||
run_ctt(json_output,directory,config,log_output)
|
run_ctt(json_output, directory, config, log_output)
|
||||||
|
|
|
@ -12,7 +12,7 @@ from mpl_toolkits.mplot3d import Axes3D
|
||||||
"""
|
"""
|
||||||
preform alsc calibration on a set of images
|
preform alsc calibration on a set of images
|
||||||
"""
|
"""
|
||||||
def alsc_all(Cam,do_alsc_colour,plot):
|
def alsc_all(Cam, do_alsc_colour, plot):
|
||||||
imgs_alsc = Cam.imgs_alsc
|
imgs_alsc = Cam.imgs_alsc
|
||||||
"""
|
"""
|
||||||
create list of colour temperatures and associated calibration tables
|
create list of colour temperatures and associated calibration tables
|
||||||
|
@ -22,16 +22,16 @@ def alsc_all(Cam,do_alsc_colour,plot):
|
||||||
list_cb = []
|
list_cb = []
|
||||||
list_cg = []
|
list_cg = []
|
||||||
for Img in imgs_alsc:
|
for Img in imgs_alsc:
|
||||||
col,cr,cb,cg,size = alsc(Cam,Img,do_alsc_colour,plot)
|
col, cr, cb, cg, size = alsc(Cam, Img, do_alsc_colour, plot)
|
||||||
list_col.append(col)
|
list_col.append(col)
|
||||||
list_cr.append(cr)
|
list_cr.append(cr)
|
||||||
list_cb.append(cb)
|
list_cb.append(cb)
|
||||||
list_cg.append(cg)
|
list_cg.append(cg)
|
||||||
Cam.log += '\n'
|
Cam.log += '\n'
|
||||||
Cam.log += '\nFinished processing images'
|
Cam.log += '\nFinished processing images'
|
||||||
w,h,dx,dy = size
|
w, h, dx, dy = size
|
||||||
Cam.log += '\nChannel dimensions: w = {} h = {}'.format(int(w),int(h))
|
Cam.log += '\nChannel dimensions: w = {} h = {}'.format(int(w), int(h))
|
||||||
Cam.log += '\n16x12 grid rectangle size: w = {} h = {}'.format(dx,dy)
|
Cam.log += '\n16x12 grid rectangle size: w = {} h = {}'.format(dx, dy)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
convert to numpy array for data manipulation
|
convert to numpy array for data manipulation
|
||||||
|
@ -56,66 +56,66 @@ def alsc_all(Cam,do_alsc_colour,plot):
|
||||||
"""
|
"""
|
||||||
indices = np.where(list_col == ct)
|
indices = np.where(list_col == ct)
|
||||||
ct = int(ct)
|
ct = int(ct)
|
||||||
t_r = np.mean(list_cr[indices],axis=0)
|
t_r = np.mean(list_cr[indices], axis=0)
|
||||||
t_b = np.mean(list_cb[indices],axis=0)
|
t_b = np.mean(list_cb[indices], axis=0)
|
||||||
"""
|
"""
|
||||||
force numbers to be stored to 3dp.... :(
|
force numbers to be stored to 3dp.... :(
|
||||||
"""
|
"""
|
||||||
t_r = np.where((100*t_r)%1<=0.05, t_r+0.001,t_r)
|
t_r = np.where((100*t_r)%1<=0.05, t_r+0.001, t_r)
|
||||||
t_b = np.where((100*t_b)%1<=0.05, t_b+0.001,t_b)
|
t_b = np.where((100*t_b)%1<=0.05, t_b+0.001, t_b)
|
||||||
t_r = np.where((100*t_r)%1>=0.95, t_r-0.001,t_r)
|
t_r = np.where((100*t_r)%1>=0.95, t_r-0.001, t_r)
|
||||||
t_b = np.where((100*t_b)%1>=0.95, t_b-0.001,t_b)
|
t_b = np.where((100*t_b)%1>=0.95, t_b-0.001, t_b)
|
||||||
t_r = np.round(t_r,3)
|
t_r = np.round(t_r, 3)
|
||||||
t_b = np.round(t_b,3)
|
t_b = np.round(t_b, 3)
|
||||||
r_corners = (t_r[0],t_r[15],t_r[-1],t_r[-16])
|
r_corners = (t_r[0], t_r[15], t_r[-1], t_r[-16])
|
||||||
b_corners = (t_b[0],t_b[15],t_b[-1],t_b[-16])
|
b_corners = (t_b[0], t_b[15], t_b[-1], t_b[-16])
|
||||||
r_cen = t_r[5*16+7]+t_r[5*16+8]+t_r[6*16+7]+t_r[6*16+8]
|
r_cen = t_r[5*16+7]+t_r[5*16+8]+t_r[6*16+7]+t_r[6*16+8]
|
||||||
r_cen = round(r_cen/4,3)
|
r_cen = round(r_cen/4, 3)
|
||||||
b_cen = t_b[5*16+7]+t_b[5*16+8]+t_b[6*16+7]+t_b[6*16+8]
|
b_cen = t_b[5*16+7]+t_b[5*16+8]+t_b[6*16+7]+t_b[6*16+8]
|
||||||
b_cen = round(b_cen/4,3)
|
b_cen = round(b_cen/4, 3)
|
||||||
Cam.log += '\nRed table corners: {}'.format(r_corners)
|
Cam.log += '\nRed table corners: {}'.format(r_corners)
|
||||||
Cam.log += '\nRed table centre: {}'.format(r_cen)
|
Cam.log += '\nRed table centre: {}'.format(r_cen)
|
||||||
Cam.log += '\nBlue table corners: {}'.format(b_corners)
|
Cam.log += '\nBlue table corners: {}'.format(b_corners)
|
||||||
Cam.log += '\nBlue table centre: {}'.format(b_cen)
|
Cam.log += '\nBlue table centre: {}'.format(b_cen)
|
||||||
cr_dict = {
|
cr_dict = {
|
||||||
'ct':ct,
|
'ct': ct,
|
||||||
'table':list(t_r)
|
'table': list(t_r)
|
||||||
}
|
}
|
||||||
cb_dict = {
|
cb_dict = {
|
||||||
'ct':ct,
|
'ct': ct,
|
||||||
'table':list(t_b)
|
'table': list(t_b)
|
||||||
}
|
}
|
||||||
cal_cr_list.append(cr_dict)
|
cal_cr_list.append(cr_dict)
|
||||||
cal_cb_list.append(cb_dict)
|
cal_cb_list.append(cb_dict)
|
||||||
Cam.log += '\n'
|
Cam.log += '\n'
|
||||||
else:
|
else:
|
||||||
cal_cr_list,cal_cb_list = None,None
|
cal_cr_list, cal_cb_list = None, None
|
||||||
|
|
||||||
"""
|
"""
|
||||||
average all values for luminance shading and return one table for all temperatures
|
average all values for luminance shading and return one table for all temperatures
|
||||||
"""
|
"""
|
||||||
lum_lut = np.mean(list_cg,axis=0)
|
lum_lut = np.mean(list_cg, axis=0)
|
||||||
lum_lut = np.where((100*lum_lut)%1<=0.05,lum_lut+0.001,lum_lut)
|
lum_lut = np.where((100*lum_lut)%1<=0.05, lum_lut+0.001, lum_lut)
|
||||||
lum_lut = np.where((100*lum_lut)%1>=0.95,lum_lut-0.001,lum_lut)
|
lum_lut = np.where((100*lum_lut)%1>=0.95, lum_lut-0.001, lum_lut)
|
||||||
lum_lut = list(np.round(lum_lut,3))
|
lum_lut = list(np.round(lum_lut, 3))
|
||||||
|
|
||||||
"""
|
"""
|
||||||
calculate average corner for lsc gain calculation further on
|
calculate average corner for lsc gain calculation further on
|
||||||
"""
|
"""
|
||||||
corners = (lum_lut[0],lum_lut[15],lum_lut[-1],lum_lut[-16])
|
corners = (lum_lut[0], lum_lut[15], lum_lut[-1], lum_lut[-16])
|
||||||
Cam.log += '\nLuminance table corners: {}'.format(corners)
|
Cam.log += '\nLuminance table corners: {}'.format(corners)
|
||||||
l_cen = lum_lut[5*16+7]+lum_lut[5*16+8]+lum_lut[6*16+7]+lum_lut[6*16+8]
|
l_cen = lum_lut[5*16+7]+lum_lut[5*16+8]+lum_lut[6*16+7]+lum_lut[6*16+8]
|
||||||
l_cen = round(l_cen/4,3)
|
l_cen = round(l_cen/4, 3)
|
||||||
Cam.log += '\nLuminance table centre: {}'.format(l_cen)
|
Cam.log += '\nLuminance table centre: {}'.format(l_cen)
|
||||||
av_corn = np.sum(corners)/4
|
av_corn = np.sum(corners)/4
|
||||||
|
|
||||||
return cal_cr_list,cal_cb_list,lum_lut,av_corn
|
return cal_cr_list, cal_cb_list, lum_lut, av_corn
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
calculate g/r and g/b for 32x32 points arranged in a grid for a single image
|
calculate g/r and g/b for 32x32 points arranged in a grid for a single image
|
||||||
"""
|
"""
|
||||||
def alsc(Cam,Img,do_alsc_colour,plot=False):
|
def alsc(Cam, Img, do_alsc_colour, plot=False):
|
||||||
Cam.log += '\nProcessing image: ' + Img.name
|
Cam.log += '\nProcessing image: ' + Img.name
|
||||||
"""
|
"""
|
||||||
get channel in correct order
|
get channel in correct order
|
||||||
|
@ -126,31 +126,31 @@ def alsc(Cam,Img,do_alsc_colour,plot=False):
|
||||||
-(-(w-1)//32) is a ceiling division. w-1 is to deal robustly with the case
|
-(-(w-1)//32) is a ceiling division. w-1 is to deal robustly with the case
|
||||||
where w is a multiple of 32.
|
where w is a multiple of 32.
|
||||||
"""
|
"""
|
||||||
w,h = Img.w/2,Img.h/2
|
w, h = Img.w/2, Img.h/2
|
||||||
dx,dy = int(-(-(w-1)//16)),int(-(-(h-1)//12))
|
dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12))
|
||||||
"""
|
"""
|
||||||
average the green channels into one
|
average the green channels into one
|
||||||
"""
|
"""
|
||||||
av_ch_g = np.mean((channels[1:2]),axis = 0)
|
av_ch_g = np.mean((channels[1:2]), axis = 0)
|
||||||
if do_alsc_colour:
|
if do_alsc_colour:
|
||||||
"""
|
"""
|
||||||
obtain 16x12 grid of intensities for each channel and subtract black level
|
obtain 16x12 grid of intensities for each channel and subtract black level
|
||||||
"""
|
"""
|
||||||
g = get_16x12_grid(av_ch_g,dx,dy) - Img.blacklevel_16
|
g = get_16x12_grid(av_ch_g, dx, dy) - Img.blacklevel_16
|
||||||
r = get_16x12_grid(channels[0],dx,dy) - Img.blacklevel_16
|
r = get_16x12_grid(channels[0], dx, dy) - Img.blacklevel_16
|
||||||
b = get_16x12_grid(channels[3],dx,dy) - Img.blacklevel_16
|
b = get_16x12_grid(channels[3], dx, dy) - Img.blacklevel_16
|
||||||
"""
|
"""
|
||||||
calculate ratios as 32 bit in order to be supported by medianBlur function
|
calculate ratios as 32 bit in order to be supported by medianBlur function
|
||||||
"""
|
"""
|
||||||
cr = np.reshape(g/r,(12,16)).astype('float32')
|
cr = np.reshape(g/r, (12, 16)).astype('float32')
|
||||||
cb = np.reshape(g/b,(12,16)).astype('float32')
|
cb = np.reshape(g/b, (12, 16)).astype('float32')
|
||||||
cg = np.reshape(1/g,(12,16)).astype('float32')
|
cg = np.reshape(1/g, (12, 16)).astype('float32')
|
||||||
"""
|
"""
|
||||||
median blur to remove peaks and save as float 64
|
median blur to remove peaks and save as float 64
|
||||||
"""
|
"""
|
||||||
cr = cv2.medianBlur(cr,3).astype('float64')
|
cr = cv2.medianBlur(cr, 3).astype('float64')
|
||||||
cb = cv2.medianBlur(cb,3).astype('float64')
|
cb = cv2.medianBlur(cb, 3).astype('float64')
|
||||||
cg = cv2.medianBlur(cg,3).astype('float64')
|
cg = cv2.medianBlur(cg, 3).astype('float64')
|
||||||
cg = cg/np.min(cg)
|
cg = cg/np.min(cg)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
@ -158,49 +158,49 @@ def alsc(Cam,Img,do_alsc_colour,plot=False):
|
||||||
for sanity check
|
for sanity check
|
||||||
"""
|
"""
|
||||||
if plot:
|
if plot:
|
||||||
hf = plt.figure(figsize=(8,8))
|
hf = plt.figure(figsize=(8, 8))
|
||||||
ha = hf.add_subplot(311, projection='3d')
|
ha = hf.add_subplot(311, projection='3d')
|
||||||
"""
|
"""
|
||||||
note Y is plotted as -Y so plot has same axes as image
|
note Y is plotted as -Y so plot has same axes as image
|
||||||
"""
|
"""
|
||||||
X,Y = np.meshgrid(range(16),range(12))
|
X, Y = np.meshgrid(range(16), range(12))
|
||||||
ha.plot_surface(X,-Y,cr,cmap=cm.coolwarm,linewidth=0)
|
ha.plot_surface(X, -Y, cr, cmap=cm.coolwarm, linewidth=0)
|
||||||
ha.set_title('ALSC Plot\nImg: {}\n\ncr'.format(Img.str))
|
ha.set_title('ALSC Plot\nImg: {}\n\ncr'.format(Img.str))
|
||||||
hb = hf.add_subplot(312, projection='3d')
|
hb = hf.add_subplot(312, projection='3d')
|
||||||
hb.plot_surface(X,-Y,cb,cmap=cm.coolwarm,linewidth=0)
|
hb.plot_surface(X, -Y, cb, cmap=cm.coolwarm, linewidth=0)
|
||||||
hb.set_title('cb')
|
hb.set_title('cb')
|
||||||
hc = hf.add_subplot(313, projection='3d')
|
hc = hf.add_subplot(313, projection='3d')
|
||||||
hc.plot_surface(X,-Y,cg,cmap=cm.coolwarm,linewidth=0)
|
hc.plot_surface(X, -Y, cg, cmap=cm.coolwarm, linewidth=0)
|
||||||
hc.set_title('g')
|
hc.set_title('g')
|
||||||
# print(Img.str)
|
# print(Img.str)
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
return Img.col,cr.flatten(),cb.flatten(),cg.flatten(),(w,h,dx,dy)
|
return Img.col, cr.flatten(), cb.flatten(), cg.flatten(), (w, h, dx, dy)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
"""
|
"""
|
||||||
only perform calculations for luminance shading
|
only perform calculations for luminance shading
|
||||||
"""
|
"""
|
||||||
g = get_16x12_grid(av_ch_g,dx,dy) - Img.blacklevel_16
|
g = get_16x12_grid(av_ch_g, dx, dy) - Img.blacklevel_16
|
||||||
cg = np.reshape(1/g,(12,16)).astype('float32')
|
cg = np.reshape(1/g, (12, 16)).astype('float32')
|
||||||
cg = cv2.medianBlur(cg,3).astype('float64')
|
cg = cv2.medianBlur(cg, 3).astype('float64')
|
||||||
cg = cg/np.min(cg)
|
cg = cg/np.min(cg)
|
||||||
|
|
||||||
if plot:
|
if plot:
|
||||||
hf = plt.figure(figssize=(8,8))
|
hf = plt.figure(figssize=(8, 8))
|
||||||
ha = hf.add_subplot(1,1,1,projection='3d')
|
ha = hf.add_subplot(1, 1, 1, projection='3d')
|
||||||
X,Y = np.meashgrid(range(16),range(12))
|
X, Y = np.meashgrid(range(16), range(12))
|
||||||
ha.plot_surface(X,-Y,cg,cmap=cm.coolwarm,linewidth=0)
|
ha.plot_surface(X, -Y, cg, cmap=cm.coolwarm, linewidth=0)
|
||||||
ha.set_title('ALSC Plot (Luminance only!)\nImg: {}\n\ncg').format(Img.str)
|
ha.set_title('ALSC Plot (Luminance only!)\nImg: {}\n\ncg').format(Img.str)
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
return Img.col,None,None,cg.flatten(),(w,h,dx,dy)
|
return Img.col, None, None, cg.flatten(), (w, h, dx, dy)
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Compresses channel down to a 16x12 grid
|
Compresses channel down to a 16x12 grid
|
||||||
"""
|
"""
|
||||||
def get_16x12_grid(chan,dx,dy):
|
def get_16x12_grid(chan, dx, dy):
|
||||||
grid = []
|
grid = []
|
||||||
"""
|
"""
|
||||||
since left and bottom border will not necessarily have rectangles of
|
since left and bottom border will not necessarily have rectangles of
|
||||||
|
@ -208,11 +208,11 @@ def get_16x12_grid(chan,dx,dy):
|
||||||
"""
|
"""
|
||||||
for i in range(11):
|
for i in range(11):
|
||||||
for j in range(15):
|
for j in range(15):
|
||||||
grid.append(np.mean(chan[dy*i:dy*(1+i),dx*j:dx*(1+j)]))
|
grid.append(np.mean(chan[dy*i:dy*(1+i), dx*j:dx*(1+j)]))
|
||||||
grid.append(np.mean(chan[dy*i:dy*(1+i),15*dx:]))
|
grid.append(np.mean(chan[dy*i:dy*(1+i), 15*dx:]))
|
||||||
for j in range(15):
|
for j in range(15):
|
||||||
grid.append(np.mean(chan[11*dy:,dx*j:dx*(1+j)]))
|
grid.append(np.mean(chan[11*dy:, dx*j:dx*(1+j)]))
|
||||||
grid.append(np.mean(chan[11*dy:,15*dx:]))
|
grid.append(np.mean(chan[11*dy:, 15*dx:]))
|
||||||
"""
|
"""
|
||||||
return as np.array, ready for further manipulation
|
return as np.array, ready for further manipulation
|
||||||
"""
|
"""
|
||||||
|
@ -221,7 +221,7 @@ def get_16x12_grid(chan,dx,dy):
|
||||||
"""
|
"""
|
||||||
obtains sigmas for red and blue, effectively a measure of the 'error'
|
obtains sigmas for red and blue, effectively a measure of the 'error'
|
||||||
"""
|
"""
|
||||||
def get_sigma(Cam,cal_cr_list,cal_cb_list):
|
def get_sigma(Cam, cal_cr_list, cal_cb_list):
|
||||||
Cam.log += '\nCalculating sigmas'
|
Cam.log += '\nCalculating sigmas'
|
||||||
"""
|
"""
|
||||||
provided colour alsc tables were generated for two different colour
|
provided colour alsc tables were generated for two different colour
|
||||||
|
@ -239,9 +239,9 @@ def get_sigma(Cam,cal_cr_list,cal_cb_list):
|
||||||
sigma_rs = []
|
sigma_rs = []
|
||||||
sigma_bs = []
|
sigma_bs = []
|
||||||
for i in range(len(cts)-1):
|
for i in range(len(cts)-1):
|
||||||
sigma_rs.append(calc_sigma(cal_cr_list[i]['table'],cal_cr_list[i+1]['table']))
|
sigma_rs.append(calc_sigma(cal_cr_list[i]['table'], cal_cr_list[i+1]['table']))
|
||||||
sigma_bs.append(calc_sigma(cal_cb_list[i]['table'],cal_cb_list[i+1]['table']))
|
sigma_bs.append(calc_sigma(cal_cb_list[i]['table'], cal_cb_list[i+1]['table']))
|
||||||
Cam.log += '\nColour temperature interval {} - {} K'.format(cts[i],cts[i+1])
|
Cam.log += '\nColour temperature interval {} - {} K'.format(cts[i], cts[i+1])
|
||||||
Cam.log += '\nSigma red: {}'.format(sigma_rs[-1])
|
Cam.log += '\nSigma red: {}'.format(sigma_rs[-1])
|
||||||
Cam.log += '\nSigma blue: {}'.format(sigma_bs[-1])
|
Cam.log += '\nSigma blue: {}'.format(sigma_bs[-1])
|
||||||
|
|
||||||
|
@ -254,19 +254,19 @@ def get_sigma(Cam,cal_cr_list,cal_cb_list):
|
||||||
Cam.log += '\nMaximum sigmas: Red = {} Blue = {}'.format(sigma_r, sigma_b)
|
Cam.log += '\nMaximum sigmas: Red = {} Blue = {}'.format(sigma_r, sigma_b)
|
||||||
|
|
||||||
|
|
||||||
# print(sigma_rs,sigma_bs)
|
# print(sigma_rs, sigma_bs)
|
||||||
# print(sigma_r,sigma_b)
|
# print(sigma_r, sigma_b)
|
||||||
return sigma_r,sigma_b
|
return sigma_r, sigma_b
|
||||||
|
|
||||||
"""
|
"""
|
||||||
calculate sigma from two adjacent gain tables
|
calculate sigma from two adjacent gain tables
|
||||||
"""
|
"""
|
||||||
def calc_sigma(g1,g2):
|
def calc_sigma(g1, g2):
|
||||||
"""
|
"""
|
||||||
reshape into 16x12 matrix
|
reshape into 16x12 matrix
|
||||||
"""
|
"""
|
||||||
g1 = np.reshape(g1,(12,16))
|
g1 = np.reshape(g1, (12, 16))
|
||||||
g2 = np.reshape(g2,(12,16))
|
g2 = np.reshape(g2, (12, 16))
|
||||||
"""
|
"""
|
||||||
apply gains to gain table
|
apply gains to gain table
|
||||||
"""
|
"""
|
||||||
|
@ -294,4 +294,4 @@ def calc_sigma(g1,g2):
|
||||||
return mean difference
|
return mean difference
|
||||||
"""
|
"""
|
||||||
mean_diff = np.mean(diffs)
|
mean_diff = np.mean(diffs)
|
||||||
return(np.round(mean_diff,5))
|
return(np.round(mean_diff, 5))
|
||||||
|
|
|
@ -12,7 +12,7 @@ from scipy.optimize import fmin
|
||||||
"""
|
"""
|
||||||
obtain piecewise linear approximation for colour curve
|
obtain piecewise linear approximation for colour curve
|
||||||
"""
|
"""
|
||||||
def awb(Cam,cal_cr_list,cal_cb_list,plot):
|
def awb(Cam, cal_cr_list, cal_cb_list, plot):
|
||||||
imgs = Cam.imgs
|
imgs = Cam.imgs
|
||||||
"""
|
"""
|
||||||
condense alsc calibration tables into one dictionary
|
condense alsc calibration tables into one dictionary
|
||||||
|
@ -21,7 +21,7 @@ def awb(Cam,cal_cr_list,cal_cb_list,plot):
|
||||||
colour_cals = None
|
colour_cals = None
|
||||||
else:
|
else:
|
||||||
colour_cals = {}
|
colour_cals = {}
|
||||||
for cr,cb in zip(cal_cr_list,cal_cb_list):
|
for cr, cb in zip(cal_cr_list, cal_cb_list):
|
||||||
cr_tab = cr['table']
|
cr_tab = cr['table']
|
||||||
cb_tab = cb['table']
|
cb_tab = cb['table']
|
||||||
"""
|
"""
|
||||||
|
@ -29,7 +29,7 @@ def awb(Cam,cal_cr_list,cal_cb_list,plot):
|
||||||
"""
|
"""
|
||||||
cr_tab= cr_tab/np.min(cr_tab)
|
cr_tab= cr_tab/np.min(cr_tab)
|
||||||
cb_tab= cb_tab/np.min(cb_tab)
|
cb_tab= cb_tab/np.min(cb_tab)
|
||||||
colour_cals[cr['ct']] = [cr_tab,cb_tab]
|
colour_cals[cr['ct']] = [cr_tab, cb_tab]
|
||||||
"""
|
"""
|
||||||
obtain data from greyscale macbeth patches
|
obtain data from greyscale macbeth patches
|
||||||
"""
|
"""
|
||||||
|
@ -42,17 +42,17 @@ def awb(Cam,cal_cr_list,cal_cb_list,plot):
|
||||||
Note: if alsc is disabled then colour_cals will be set to None and the
|
Note: if alsc is disabled then colour_cals will be set to None and the
|
||||||
function will just return the greyscale patches
|
function will just return the greyscale patches
|
||||||
"""
|
"""
|
||||||
r_patchs,b_patchs,g_patchs = get_alsc_patches(Img,colour_cals)
|
r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals)
|
||||||
"""
|
"""
|
||||||
calculate ratio of r,b to g
|
calculate ratio of r, b to g
|
||||||
"""
|
"""
|
||||||
r_g = np.mean(r_patchs/g_patchs)
|
r_g = np.mean(r_patchs/g_patchs)
|
||||||
b_g = np.mean(b_patchs/g_patchs)
|
b_g = np.mean(b_patchs/g_patchs)
|
||||||
Cam.log += '\n r : {:.4f} b : {:.4f}'.format(r_g,b_g)
|
Cam.log += '\n r : {:.4f} b : {:.4f}'.format(r_g, b_g)
|
||||||
"""
|
"""
|
||||||
The curve tends to be better behaved in so-called hatspace.
|
The curve tends to be better behaved in so-called hatspace.
|
||||||
R,B,G represent the individual channels. The colour curve is plotted in
|
R, B, G represent the individual channels. The colour curve is plotted in
|
||||||
r,b space, where:
|
r, b space, where:
|
||||||
r = R/G
|
r = R/G
|
||||||
b = B/G
|
b = B/G
|
||||||
This will be referred to as dehatspace... (sorry)
|
This will be referred to as dehatspace... (sorry)
|
||||||
|
@ -71,18 +71,18 @@ def awb(Cam,cal_cr_list,cal_cb_list,plot):
|
||||||
"""
|
"""
|
||||||
r_g_hat = r_g/(1+r_g+b_g)
|
r_g_hat = r_g/(1+r_g+b_g)
|
||||||
b_g_hat = b_g/(1+r_g+b_g)
|
b_g_hat = b_g/(1+r_g+b_g)
|
||||||
Cam.log += '\n r_hat : {:.4f} b_hat : {:.4f}'.format(r_g_hat,b_g_hat)
|
Cam.log += '\n r_hat : {:.4f} b_hat : {:.4f}'.format(r_g_hat, b_g_hat)
|
||||||
rbs_hat.append((r_g_hat,b_g_hat,Img.col))
|
rbs_hat.append((r_g_hat, b_g_hat, Img.col))
|
||||||
rb_raw.append((r_g,b_g))
|
rb_raw.append((r_g, b_g))
|
||||||
Cam.log += '\n'
|
Cam.log += '\n'
|
||||||
|
|
||||||
Cam.log += '\nFinished processing images'
|
Cam.log += '\nFinished processing images'
|
||||||
"""
|
"""
|
||||||
sort all lits simultaneously by r_hat
|
sort all lits simultaneously by r_hat
|
||||||
"""
|
"""
|
||||||
rbs_zip = list(zip(rbs_hat,rb_raw))
|
rbs_zip = list(zip(rbs_hat, rb_raw))
|
||||||
rbs_zip.sort(key=lambda x:x[0][0])
|
rbs_zip.sort(key=lambda x: x[0][0])
|
||||||
rbs_hat,rb_raw = list(zip(*rbs_zip))
|
rbs_hat, rb_raw = list(zip(*rbs_zip))
|
||||||
"""
|
"""
|
||||||
unzip tuples ready for processing
|
unzip tuples ready for processing
|
||||||
"""
|
"""
|
||||||
|
@ -91,7 +91,7 @@ def awb(Cam,cal_cr_list,cal_cb_list,plot):
|
||||||
"""
|
"""
|
||||||
fit quadratic fit to r_g hat and b_g_hat
|
fit quadratic fit to r_g hat and b_g_hat
|
||||||
"""
|
"""
|
||||||
a,b,c = np.polyfit(rbs_hat[0],rbs_hat[1],2)
|
a, b, c = np.polyfit(rbs_hat[0], rbs_hat[1], 2)
|
||||||
Cam.log += '\nFit quadratic curve in hatspace'
|
Cam.log += '\nFit quadratic curve in hatspace'
|
||||||
"""
|
"""
|
||||||
the algorithm now approximates the shortest distance from each point to the
|
the algorithm now approximates the shortest distance from each point to the
|
||||||
|
@ -113,11 +113,11 @@ def awb(Cam,cal_cr_list,cal_cb_list,plot):
|
||||||
def f(x):
|
def f(x):
|
||||||
return a*x**2 + b*x + c
|
return a*x**2 + b*x + c
|
||||||
"""
|
"""
|
||||||
iterate over points (R,B are x and y coordinates of points) and calculate
|
iterate over points (R, B are x and y coordinates of points) and calculate
|
||||||
distance to line in dehatspace
|
distance to line in dehatspace
|
||||||
"""
|
"""
|
||||||
dists = []
|
dists = []
|
||||||
for i, (R,B) in enumerate(zip(rbs_hat[0],rbs_hat[1])):
|
for i, (R, B) in enumerate(zip(rbs_hat[0], rbs_hat[1])):
|
||||||
"""
|
"""
|
||||||
define function to minimise as square distance between datapoint and
|
define function to minimise as square distance between datapoint and
|
||||||
point on curve. Squaring is monotonic so minimising radius squared is
|
point on curve. Squaring is monotonic so minimising radius squared is
|
||||||
|
@ -129,7 +129,7 @@ def awb(Cam,cal_cr_list,cal_cb_list,plot):
|
||||||
"""
|
"""
|
||||||
perform optimisation with scipy.optmisie.fmin
|
perform optimisation with scipy.optmisie.fmin
|
||||||
"""
|
"""
|
||||||
x_hat = fmin(f_min,R,disp=0)[0]
|
x_hat = fmin(f_min, R, disp=0)[0]
|
||||||
y_hat = f(x_hat)
|
y_hat = f(x_hat)
|
||||||
"""
|
"""
|
||||||
dehat
|
dehat
|
||||||
|
@ -179,16 +179,16 @@ def awb(Cam,cal_cr_list,cal_cb_list,plot):
|
||||||
"""
|
"""
|
||||||
r_fit = r_hat_fit/(1-r_hat_fit-b_hat_fit)
|
r_fit = r_hat_fit/(1-r_hat_fit-b_hat_fit)
|
||||||
b_fit = b_hat_fit/(1-r_hat_fit-b_hat_fit)
|
b_fit = b_hat_fit/(1-r_hat_fit-b_hat_fit)
|
||||||
c_fit = np.round(rbs_hat[2],0)
|
c_fit = np.round(rbs_hat[2], 0)
|
||||||
"""
|
"""
|
||||||
round to 4dp
|
round to 4dp
|
||||||
"""
|
"""
|
||||||
r_fit = np.where((1000*r_fit)%1<=0.05,r_fit+0.0001,r_fit)
|
r_fit = np.where((1000*r_fit)%1<=0.05, r_fit+0.0001, r_fit)
|
||||||
r_fit = np.where((1000*r_fit)%1>=0.95,r_fit-0.0001,r_fit)
|
r_fit = np.where((1000*r_fit)%1>=0.95, r_fit-0.0001, r_fit)
|
||||||
b_fit = np.where((1000*b_fit)%1<=0.05,b_fit+0.0001,b_fit)
|
b_fit = np.where((1000*b_fit)%1<=0.05, b_fit+0.0001, b_fit)
|
||||||
b_fit = np.where((1000*b_fit)%1>=0.95,b_fit-0.0001,b_fit)
|
b_fit = np.where((1000*b_fit)%1>=0.95, b_fit-0.0001, b_fit)
|
||||||
r_fit = np.round(r_fit,4)
|
r_fit = np.round(r_fit, 4)
|
||||||
b_fit = np.round(b_fit,4)
|
b_fit = np.round(b_fit, 4)
|
||||||
"""
|
"""
|
||||||
The following code ensures that colour temperature decreases with
|
The following code ensures that colour temperature decreases with
|
||||||
increasing r/g
|
increasing r/g
|
||||||
|
@ -200,8 +200,8 @@ def awb(Cam,cal_cr_list,cal_cb_list,plot):
|
||||||
while i > 0 :
|
while i > 0 :
|
||||||
if c_fit[i] > c_fit[i-1]:
|
if c_fit[i] > c_fit[i-1]:
|
||||||
Cam.log += '\nColour temperature increase found\n'
|
Cam.log += '\nColour temperature increase found\n'
|
||||||
Cam.log += '{} K at r = {} to '.format(c_fit[i-1],r_fit[i-1])
|
Cam.log += '{} K at r = {} to '.format(c_fit[i-1], r_fit[i-1])
|
||||||
Cam.log += '{} K at r = {}'.format(c_fit[i],r_fit[i])
|
Cam.log += '{} K at r = {}'.format(c_fit[i], r_fit[i])
|
||||||
"""
|
"""
|
||||||
if colour temperature increases then discard point furthest from
|
if colour temperature increases then discard point furthest from
|
||||||
the transformed fit (dehatspace)
|
the transformed fit (dehatspace)
|
||||||
|
@ -209,8 +209,8 @@ def awb(Cam,cal_cr_list,cal_cb_list,plot):
|
||||||
error_1 = abs(dists[i-1])
|
error_1 = abs(dists[i-1])
|
||||||
error_2 = abs(dists[i])
|
error_2 = abs(dists[i])
|
||||||
Cam.log += '\nDistances from fit:\n'
|
Cam.log += '\nDistances from fit:\n'
|
||||||
Cam.log += '{} K : {:.5f} , '.format(c_fit[i],error_1)
|
Cam.log += '{} K : {:.5f} , '.format(c_fit[i], error_1)
|
||||||
Cam.log += '{} K : {:.5f}'.format(c_fit[i-1],error_2)
|
Cam.log += '{} K : {:.5f}'.format(c_fit[i-1], error_2)
|
||||||
"""
|
"""
|
||||||
find bad index
|
find bad index
|
||||||
note that in python false = 0 and true = 1
|
note that in python false = 0 and true = 1
|
||||||
|
@ -221,9 +221,9 @@ def awb(Cam,cal_cr_list,cal_cb_list,plot):
|
||||||
"""
|
"""
|
||||||
delete bad point
|
delete bad point
|
||||||
"""
|
"""
|
||||||
r_fit = np.delete(r_fit,bad)
|
r_fit = np.delete(r_fit, bad)
|
||||||
b_fit = np.delete(b_fit,bad)
|
b_fit = np.delete(b_fit, bad)
|
||||||
c_fit = np.delete(c_fit,bad).astype(np.uint16)
|
c_fit = np.delete(c_fit, bad).astype(np.uint16)
|
||||||
"""
|
"""
|
||||||
note that if a point has been discarded then the length has decreased
|
note that if a point has been discarded then the length has decreased
|
||||||
by one, meaning that decreasing the index by one will reassess the kept
|
by one, meaning that decreasing the index by one will reassess the kept
|
||||||
|
@ -235,7 +235,7 @@ def awb(Cam,cal_cr_list,cal_cb_list,plot):
|
||||||
"""
|
"""
|
||||||
return formatted ct curve, ordered by increasing colour temperature
|
return formatted ct curve, ordered by increasing colour temperature
|
||||||
"""
|
"""
|
||||||
ct_curve = list(np.array(list(zip(b_fit,r_fit,c_fit))).flatten())[::-1]
|
ct_curve = list(np.array(list(zip(b_fit, r_fit, c_fit))).flatten())[::-1]
|
||||||
Cam.log += '\nFinal CT curve:'
|
Cam.log += '\nFinal CT curve:'
|
||||||
for i in range(len(ct_curve)//3):
|
for i in range(len(ct_curve)//3):
|
||||||
j = 3*i
|
j = 3*i
|
||||||
|
@ -247,15 +247,15 @@ def awb(Cam,cal_cr_list,cal_cb_list,plot):
|
||||||
plotting code for debug
|
plotting code for debug
|
||||||
"""
|
"""
|
||||||
if plot:
|
if plot:
|
||||||
x = np.linspace(np.min(rbs_hat[0]),np.max(rbs_hat[0]),100)
|
x = np.linspace(np.min(rbs_hat[0]), np.max(rbs_hat[0]), 100)
|
||||||
y = a*x**2 + b*x + c
|
y = a*x**2 + b*x + c
|
||||||
plt.subplot(2,1,1)
|
plt.subplot(2, 1, 1)
|
||||||
plt.title('hatspace')
|
plt.title('hatspace')
|
||||||
plt.plot(rbs_hat[0],rbs_hat[1],ls='--',color='blue')
|
plt.plot(rbs_hat[0], rbs_hat[1], ls='--', color='blue')
|
||||||
plt.plot(x,y,color='green',ls='-')
|
plt.plot(x, y, color='green', ls='-')
|
||||||
plt.scatter(rbs_hat[0],rbs_hat[1],color='red')
|
plt.scatter(rbs_hat[0], rbs_hat[1], color='red')
|
||||||
for i, ct in enumerate(rbs_hat[2]):
|
for i, ct in enumerate(rbs_hat[2]):
|
||||||
plt.annotate(str(ct),(rbs_hat[0][i],rbs_hat[1][i]))
|
plt.annotate(str(ct), (rbs_hat[0][i], rbs_hat[1][i]))
|
||||||
plt.xlabel('$\hat{r}$')
|
plt.xlabel('$\hat{r}$')
|
||||||
plt.ylabel('$\hat{b}$')
|
plt.ylabel('$\hat{b}$')
|
||||||
"""
|
"""
|
||||||
|
@ -265,13 +265,13 @@ def awb(Cam,cal_cr_list,cal_cb_list,plot):
|
||||||
# ax = plt.gca()
|
# ax = plt.gca()
|
||||||
# ax.set_aspect('equal')
|
# ax.set_aspect('equal')
|
||||||
plt.grid()
|
plt.grid()
|
||||||
plt.subplot(2,1,2)
|
plt.subplot(2, 1, 2)
|
||||||
plt.title('dehatspace - indoors?')
|
plt.title('dehatspace - indoors?')
|
||||||
plt.plot(r_fit,b_fit,color='blue')
|
plt.plot(r_fit, b_fit, color='blue')
|
||||||
plt.scatter(rb_raw[0],rb_raw[1],color='green')
|
plt.scatter(rb_raw[0], rb_raw[1], color='green')
|
||||||
plt.scatter(r_fit,b_fit,color='red')
|
plt.scatter(r_fit, b_fit, color='red')
|
||||||
for i,ct in enumerate(c_fit):
|
for i, ct in enumerate(c_fit):
|
||||||
plt.annotate(str(ct),(r_fit[i],b_fit[i]))
|
plt.annotate(str(ct), (r_fit[i], b_fit[i]))
|
||||||
plt.xlabel('$r$')
|
plt.xlabel('$r$')
|
||||||
plt.ylabel('$b$')
|
plt.ylabel('$b$')
|
||||||
"""
|
"""
|
||||||
|
@ -286,15 +286,15 @@ def awb(Cam,cal_cr_list,cal_cb_list,plot):
|
||||||
"""
|
"""
|
||||||
end of plotting code
|
end of plotting code
|
||||||
"""
|
"""
|
||||||
return(ct_curve,np.round(transverse_pos,5),np.round(transverse_neg,5))
|
return(ct_curve, np.round(transverse_pos, 5), np.round(transverse_neg, 5))
|
||||||
|
|
||||||
"""
|
"""
|
||||||
obtain greyscale patches and perform alsc colour correction
|
obtain greyscale patches and perform alsc colour correction
|
||||||
"""
|
"""
|
||||||
def get_alsc_patches(Img,colour_cals,grey=True):
|
def get_alsc_patches(Img, colour_cals, grey=True):
|
||||||
"""
|
"""
|
||||||
get patch centre coordinates, image colour and the actual
|
get patch centre coordinates, image colour and the actual
|
||||||
patches for each channel,remembering to subtract blacklevel
|
patches for each channel, remembering to subtract blacklevel
|
||||||
If grey then only greyscale patches considered
|
If grey then only greyscale patches considered
|
||||||
"""
|
"""
|
||||||
if grey:
|
if grey:
|
||||||
|
@ -316,12 +316,12 @@ def get_alsc_patches(Img,colour_cals,grey=True):
|
||||||
g_patchs = (patches[1]+patches[2])/2 - Img.blacklevel_16
|
g_patchs = (patches[1]+patches[2])/2 - Img.blacklevel_16
|
||||||
|
|
||||||
if colour_cals == None:
|
if colour_cals == None:
|
||||||
return r_patchs,b_patchs,g_patchs
|
return r_patchs, b_patchs, g_patchs
|
||||||
"""
|
"""
|
||||||
find where image colour fits in alsc colour calibration tables
|
find where image colour fits in alsc colour calibration tables
|
||||||
"""
|
"""
|
||||||
cts = list(colour_cals.keys())
|
cts = list(colour_cals.keys())
|
||||||
pos = bisect_left(cts,col)
|
pos = bisect_left(cts, col)
|
||||||
"""
|
"""
|
||||||
if img colour is below minimum or above maximum alsc calibration colour, simply
|
if img colour is below minimum or above maximum alsc calibration colour, simply
|
||||||
pick extreme closest to img colour
|
pick extreme closest to img colour
|
||||||
|
@ -343,32 +343,32 @@ def get_alsc_patches(Img,colour_cals,grey=True):
|
||||||
bef_tabs = np.array(colour_cals[bef])
|
bef_tabs = np.array(colour_cals[bef])
|
||||||
aft_tabs = np.array(colour_cals[aft])
|
aft_tabs = np.array(colour_cals[aft])
|
||||||
col_tabs = (bef_tabs*db + aft_tabs*da)/(da+db)
|
col_tabs = (bef_tabs*db + aft_tabs*da)/(da+db)
|
||||||
col_tabs = np.reshape(col_tabs,(2,12,16))
|
col_tabs = np.reshape(col_tabs, (2, 12, 16))
|
||||||
"""
|
"""
|
||||||
calculate dx,dy used to calculate alsc table
|
calculate dx, dy used to calculate alsc table
|
||||||
"""
|
"""
|
||||||
w,h = Img.w/2,Img.h/2
|
w, h = Img.w/2, Img.h/2
|
||||||
dx,dy = int(-(-(w-1)//16)),int(-(-(h-1)//12))
|
dx, dy = int(-(-(w-1)//16)), int(-(-(h-1)//12))
|
||||||
"""
|
"""
|
||||||
make list of pairs of gains for each patch by selecting the correct value
|
make list of pairs of gains for each patch by selecting the correct value
|
||||||
in alsc colour calibration table
|
in alsc colour calibration table
|
||||||
"""
|
"""
|
||||||
patch_gains = []
|
patch_gains = []
|
||||||
for cen in cen_coords:
|
for cen in cen_coords:
|
||||||
x,y = cen[0]//dx, cen[1]//dy
|
x, y = cen[0]//dx, cen[1]//dy
|
||||||
# We could probably do with some better spatial interpolation here?
|
# We could probably do with some better spatial interpolation here?
|
||||||
col_gains = (col_tabs[0][y][x],col_tabs[1][y][x])
|
col_gains = (col_tabs[0][y][x], col_tabs[1][y][x])
|
||||||
patch_gains.append(col_gains)
|
patch_gains.append(col_gains)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
multiply the r and b channels in each patch by the respective gain, finally
|
multiply the r and b channels in each patch by the respective gain, finally
|
||||||
performing the alsc colour correction
|
performing the alsc colour correction
|
||||||
"""
|
"""
|
||||||
for i,gains in enumerate(patch_gains):
|
for i, gains in enumerate(patch_gains):
|
||||||
r_patchs[i] = r_patchs[i] * gains[0]
|
r_patchs[i] = r_patchs[i] * gains[0]
|
||||||
b_patchs[i] = b_patchs[i] * gains[1]
|
b_patchs[i] = b_patchs[i] * gains[1]
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return greyscale patches, g channel and correct r,b channels
|
return greyscale patches, g channel and correct r, b channels
|
||||||
"""
|
"""
|
||||||
return r_patchs,b_patchs,g_patchs
|
return r_patchs, b_patchs, g_patchs
|
||||||
|
|
|
@ -19,7 +19,7 @@ def degamma(x):
|
||||||
"""
|
"""
|
||||||
FInds colour correction matrices for list of images
|
FInds colour correction matrices for list of images
|
||||||
"""
|
"""
|
||||||
def ccm(Cam,cal_cr_list,cal_cb_list):
|
def ccm(Cam, cal_cr_list, cal_cb_list):
|
||||||
imgs = Cam.imgs
|
imgs = Cam.imgs
|
||||||
"""
|
"""
|
||||||
standard macbeth chart colour values
|
standard macbeth chart colour values
|
||||||
|
@ -32,7 +32,7 @@ def ccm(Cam,cal_cr_list,cal_cb_list):
|
||||||
[130, 128, 176], # blue flower
|
[130, 128, 176], # blue flower
|
||||||
[92, 190, 172], # bluish green
|
[92, 190, 172], # bluish green
|
||||||
[224, 124, 47], # orange
|
[224, 124, 47], # orange
|
||||||
[68, 91,170], # purplish blue
|
[68, 91, 170], # purplish blue
|
||||||
[198, 82, 97], # moderate red
|
[198, 82, 97], # moderate red
|
||||||
[94, 58, 106], # purple
|
[94, 58, 106], # purple
|
||||||
[159, 189, 63], # yellow green
|
[159, 189, 63], # yellow green
|
||||||
|
@ -58,7 +58,7 @@ def ccm(Cam,cal_cr_list,cal_cb_list):
|
||||||
"""
|
"""
|
||||||
reorder reference values to match how patches are ordered
|
reorder reference values to match how patches are ordered
|
||||||
"""
|
"""
|
||||||
m_srgb = np.array([m_srgb[i::6] for i in range(6)]).reshape((24,3))
|
m_srgb = np.array([m_srgb[i::6] for i in range(6)]).reshape((24, 3))
|
||||||
|
|
||||||
"""
|
"""
|
||||||
reformat alsc correction tables or set colour_cals to None if alsc is
|
reformat alsc correction tables or set colour_cals to None if alsc is
|
||||||
|
@ -68,7 +68,7 @@ def ccm(Cam,cal_cr_list,cal_cb_list):
|
||||||
colour_cals = None
|
colour_cals = None
|
||||||
else:
|
else:
|
||||||
colour_cals = {}
|
colour_cals = {}
|
||||||
for cr,cb in zip(cal_cr_list,cal_cb_list):
|
for cr, cb in zip(cal_cr_list, cal_cb_list):
|
||||||
cr_tab = cr['table']
|
cr_tab = cr['table']
|
||||||
cb_tab = cb['table']
|
cb_tab = cb['table']
|
||||||
"""
|
"""
|
||||||
|
@ -76,7 +76,7 @@ def ccm(Cam,cal_cr_list,cal_cb_list):
|
||||||
"""
|
"""
|
||||||
cr_tab= cr_tab/np.min(cr_tab)
|
cr_tab= cr_tab/np.min(cr_tab)
|
||||||
cb_tab= cb_tab/np.min(cb_tab)
|
cb_tab= cb_tab/np.min(cb_tab)
|
||||||
colour_cals[cr['ct']] = [cr_tab,cb_tab]
|
colour_cals[cr['ct']] = [cr_tab, cb_tab]
|
||||||
|
|
||||||
"""
|
"""
|
||||||
for each image, perform awb and alsc corrections.
|
for each image, perform awb and alsc corrections.
|
||||||
|
@ -91,14 +91,14 @@ def ccm(Cam,cal_cr_list,cal_cb_list):
|
||||||
Note: if alsc is disabled then colour_cals will be set to None and no
|
Note: if alsc is disabled then colour_cals will be set to None and no
|
||||||
the function will simply return the macbeth patches
|
the function will simply return the macbeth patches
|
||||||
"""
|
"""
|
||||||
r,b,g = get_alsc_patches(Img,colour_cals,grey=False)
|
r, b, g = get_alsc_patches(Img, colour_cals, grey=False)
|
||||||
"""
|
"""
|
||||||
do awb
|
do awb
|
||||||
Note: awb is done by measuring the macbeth chart in the image, rather
|
Note: awb is done by measuring the macbeth chart in the image, rather
|
||||||
than from the awb calibration. This is done so the awb will be perfect
|
than from the awb calibration. This is done so the awb will be perfect
|
||||||
and the ccm matrices will be more accurate.
|
and the ccm matrices will be more accurate.
|
||||||
"""
|
"""
|
||||||
r_greys,b_greys,g_greys = r[3::4],b[3::4],g[3::4]
|
r_greys, b_greys, g_greys = r[3::4], b[3::4], g[3::4]
|
||||||
r_g = np.mean(r_greys/g_greys)
|
r_g = np.mean(r_greys/g_greys)
|
||||||
b_g = np.mean(b_greys/g_greys)
|
b_g = np.mean(b_greys/g_greys)
|
||||||
r = r / r_g
|
r = r / r_g
|
||||||
|
@ -108,16 +108,16 @@ def ccm(Cam,cal_cr_list,cal_cb_list):
|
||||||
normalise brightness wrt reference macbeth colours and then average
|
normalise brightness wrt reference macbeth colours and then average
|
||||||
each channel for each patch
|
each channel for each patch
|
||||||
"""
|
"""
|
||||||
gain = np.mean(m_srgb)/np.mean((r,g,b))
|
gain = np.mean(m_srgb)/np.mean((r, g, b))
|
||||||
Cam.log += '\nGain with respect to standard colours: {:.3f}'.format(gain)
|
Cam.log += '\nGain with respect to standard colours: {:.3f}'.format(gain)
|
||||||
r = np.mean(gain*r,axis=1)
|
r = np.mean(gain*r, axis=1)
|
||||||
b = np.mean(gain*b,axis=1)
|
b = np.mean(gain*b, axis=1)
|
||||||
g = np.mean(gain*g,axis=1)
|
g = np.mean(gain*g, axis=1)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
calculate ccm matrix
|
calculate ccm matrix
|
||||||
"""
|
"""
|
||||||
ccm = do_ccm(r,g,b,m_srgb)
|
ccm = do_ccm(r, g, b, m_srgb)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if a ccm has already been calculated for that temperature then don't
|
if a ccm has already been calculated for that temperature then don't
|
||||||
|
@ -133,18 +133,18 @@ def ccm(Cam,cal_cr_list,cal_cb_list):
|
||||||
"""
|
"""
|
||||||
average any ccms that share a colour temperature
|
average any ccms that share a colour temperature
|
||||||
"""
|
"""
|
||||||
for k,v in ccm_tab.items():
|
for k, v in ccm_tab.items():
|
||||||
tab = np.mean(v,axis=0)
|
tab = np.mean(v, axis=0)
|
||||||
tab = np.where((10000*tab)%1<=0.05,tab+0.00001,tab)
|
tab = np.where((10000*tab)%1<=0.05, tab+0.00001, tab)
|
||||||
tab = np.where((10000*tab)%1>=0.95,tab-0.00001,tab)
|
tab = np.where((10000*tab)%1>=0.95, tab-0.00001, tab)
|
||||||
ccm_tab[k] = list(np.round(tab,5))
|
ccm_tab[k] = list(np.round(tab, 5))
|
||||||
Cam.log += '\nMatrix calculated for colour temperature of {} K'.format(k)
|
Cam.log += '\nMatrix calculated for colour temperature of {} K'.format(k)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
return all ccms with respective colour temperature in the correct format,
|
return all ccms with respective colour temperature in the correct format,
|
||||||
sorted by their colour temperature
|
sorted by their colour temperature
|
||||||
"""
|
"""
|
||||||
sorted_ccms = sorted(ccm_tab.items(),key=lambda kv: kv[0])
|
sorted_ccms = sorted(ccm_tab.items(), key=lambda kv: kv[0])
|
||||||
ccms = []
|
ccms = []
|
||||||
for i in sorted_ccms:
|
for i in sorted_ccms:
|
||||||
ccms.append({
|
ccms.append({
|
||||||
|
@ -161,19 +161,19 @@ calculation.
|
||||||
Should you want to fit them in another space (e.g. LAB) we wish you the best of
|
Should you want to fit them in another space (e.g. LAB) we wish you the best of
|
||||||
luck and send us the code when you are done! :-)
|
luck and send us the code when you are done! :-)
|
||||||
"""
|
"""
|
||||||
def do_ccm(r,g,b,m_srgb):
|
def do_ccm(r, g, b, m_srgb):
|
||||||
rb = r-b
|
rb = r-b
|
||||||
gb = g-b
|
gb = g-b
|
||||||
rb_2s = (rb*rb)
|
rb_2s = (rb*rb)
|
||||||
rb_gbs = (rb*gb)
|
rb_gbs = (rb*gb)
|
||||||
gb_2s = (gb*gb)
|
gb_2s = (gb*gb)
|
||||||
|
|
||||||
r_rbs = ( rb * (m_srgb[...,0] - b) )
|
r_rbs = ( rb * (m_srgb[..., 0] - b) )
|
||||||
r_gbs = ( gb * (m_srgb[...,0] - b) )
|
r_gbs = ( gb * (m_srgb[..., 0] - b) )
|
||||||
g_rbs = ( rb * (m_srgb[...,1] - b) )
|
g_rbs = ( rb * (m_srgb[..., 1] - b) )
|
||||||
g_gbs = ( gb * (m_srgb[...,1] - b) )
|
g_gbs = ( gb * (m_srgb[..., 1] - b) )
|
||||||
b_rbs = ( rb * (m_srgb[...,2] - b) )
|
b_rbs = ( rb * (m_srgb[..., 2] - b) )
|
||||||
b_gbs = ( gb * (m_srgb[...,2] - b) )
|
b_gbs = ( gb * (m_srgb[..., 2] - b) )
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Obtain least squares fit
|
Obtain least squares fit
|
||||||
|
@ -216,6 +216,6 @@ def do_ccm(r,g,b,m_srgb):
|
||||||
"""
|
"""
|
||||||
format ccm
|
format ccm
|
||||||
"""
|
"""
|
||||||
ccm = [r_a,r_b,r_c,g_a,g_b,g_c,b_a,b_b,b_c]
|
ccm = [r_a, r_b, r_c, g_a, g_b, g_c, b_a, b_b, b_c]
|
||||||
|
|
||||||
return ccm
|
return ccm
|
||||||
|
|
|
@ -13,25 +13,25 @@ Uses green differences in macbeth patches to fit green equalisation threshold
|
||||||
model. Ideally, all macbeth chart centres would fall below the threshold as
|
model. Ideally, all macbeth chart centres would fall below the threshold as
|
||||||
these should be corrected by geq.
|
these should be corrected by geq.
|
||||||
"""
|
"""
|
||||||
def geq_fit(Cam,plot):
|
def geq_fit(Cam, plot):
|
||||||
imgs = Cam.imgs
|
imgs = Cam.imgs
|
||||||
"""
|
"""
|
||||||
green equalisation to mitigate mazing.
|
green equalisation to mitigate mazing.
|
||||||
Fits geq model by looking at difference
|
Fits geq model by looking at difference
|
||||||
between greens in macbeth patches
|
between greens in macbeth patches
|
||||||
"""
|
"""
|
||||||
geqs = np.array([ geq(Cam,Img)*Img.againQ8_norm for Img in imgs ])
|
geqs = np.array([ geq(Cam, Img)*Img.againQ8_norm for Img in imgs ])
|
||||||
Cam.log += '\nProcessed all images'
|
Cam.log += '\nProcessed all images'
|
||||||
geqs = geqs.reshape((-1,2))
|
geqs = geqs.reshape((-1, 2))
|
||||||
"""
|
"""
|
||||||
data is sorted by green difference and top half is selected since higher
|
data is sorted by green difference and top half is selected since higher
|
||||||
green difference data define the decision boundary.
|
green difference data define the decision boundary.
|
||||||
"""
|
"""
|
||||||
geqs = np.array(sorted(geqs,key = lambda r:np.abs((r[1]-r[0])/r[0])))
|
geqs = np.array(sorted(geqs, key = lambda r: np.abs((r[1]-r[0])/r[0])))
|
||||||
|
|
||||||
length = len(geqs)
|
length = len(geqs)
|
||||||
g0 = geqs[length//2:,0]
|
g0 = geqs[length//2:, 0]
|
||||||
g1 = geqs[length//2:,1]
|
g1 = geqs[length//2:, 1]
|
||||||
gdiff = np.abs(g0-g1)
|
gdiff = np.abs(g0-g1)
|
||||||
"""
|
"""
|
||||||
find linear fit by minimising asymmetric least square errors
|
find linear fit by minimising asymmetric least square errors
|
||||||
|
@ -40,7 +40,7 @@ def geq_fit(Cam,plot):
|
||||||
threshold, hence the upper bound approach
|
threshold, hence the upper bound approach
|
||||||
"""
|
"""
|
||||||
def f(params):
|
def f(params):
|
||||||
m,c = params
|
m, c = params
|
||||||
a = gdiff - (m*g0+c)
|
a = gdiff - (m*g0+c)
|
||||||
"""
|
"""
|
||||||
asymmetric square error returns:
|
asymmetric square error returns:
|
||||||
|
@ -49,29 +49,29 @@ def geq_fit(Cam,plot):
|
||||||
"""
|
"""
|
||||||
return(np.sum(a**2+0.95*np.abs(a)*a))
|
return(np.sum(a**2+0.95*np.abs(a)*a))
|
||||||
|
|
||||||
initial_guess = [0.01,500]
|
initial_guess = [0.01, 500]
|
||||||
"""
|
"""
|
||||||
Nelder-Mead is usually not the most desirable optimisation method
|
Nelder-Mead is usually not the most desirable optimisation method
|
||||||
but has been chosen here due to its robustness to undifferentiability
|
but has been chosen here due to its robustness to undifferentiability
|
||||||
(is that a word?)
|
(is that a word?)
|
||||||
"""
|
"""
|
||||||
result = optimize.minimize(f,initial_guess,method='Nelder-Mead')
|
result = optimize.minimize(f, initial_guess, method='Nelder-Mead')
|
||||||
"""
|
"""
|
||||||
need to check if the fit worked correectly
|
need to check if the fit worked correectly
|
||||||
"""
|
"""
|
||||||
if result.success:
|
if result.success:
|
||||||
slope,offset = result.x
|
slope, offset = result.x
|
||||||
Cam.log += '\nFit result: slope = {:.5f} '.format(slope)
|
Cam.log += '\nFit result: slope = {:.5f} '.format(slope)
|
||||||
Cam.log += 'offset = {}'.format(int(offset))
|
Cam.log += 'offset = {}'.format(int(offset))
|
||||||
"""
|
"""
|
||||||
optional plotting code
|
optional plotting code
|
||||||
"""
|
"""
|
||||||
if plot:
|
if plot:
|
||||||
x = np.linspace(max(g0)*1.1,100)
|
x = np.linspace(max(g0)*1.1, 100)
|
||||||
y = slope*x + offset
|
y = slope*x + offset
|
||||||
plt.title('GEQ Asymmetric \'Upper Bound\' Fit')
|
plt.title('GEQ Asymmetric \'Upper Bound\' Fit')
|
||||||
plt.plot(x,y,color='red',ls='--',label='fit')
|
plt.plot(x, y, color='red', ls='--', label='fit')
|
||||||
plt.scatter(g0,gdiff,color='b',label='data')
|
plt.scatter(g0, gdiff, color='b', label='data')
|
||||||
plt.ylabel('Difference in green channels')
|
plt.ylabel('Difference in green channels')
|
||||||
plt.xlabel('Green value')
|
plt.xlabel('Green value')
|
||||||
|
|
||||||
|
@ -103,7 +103,7 @@ def geq_fit(Cam,plot):
|
||||||
"""
|
"""
|
||||||
if plot:
|
if plot:
|
||||||
y2 = slope*x + offset
|
y2 = slope*x + offset
|
||||||
plt.plot(x,y2,color='green',ls='--',label='scaled fit')
|
plt.plot(x, y2, color='green', ls='--', label='scaled fit')
|
||||||
plt.grid()
|
plt.grid()
|
||||||
plt.legend()
|
plt.legend()
|
||||||
plt.show()
|
plt.show()
|
||||||
|
@ -122,19 +122,19 @@ def geq_fit(Cam,plot):
|
||||||
print(result.message)
|
print(result.message)
|
||||||
Cam.log += '\nWARNING: Asymmetric least squares fit failed! '
|
Cam.log += '\nWARNING: Asymmetric least squares fit failed! '
|
||||||
Cam.log += 'Standard fit used could possibly lead to worse results'
|
Cam.log += 'Standard fit used could possibly lead to worse results'
|
||||||
fit = np.polyfit(gdiff,g0,1)
|
fit = np.polyfit(gdiff, g0, 1)
|
||||||
offset,slope = -fit[1]/fit[0],1/fit[0]
|
offset, slope = -fit[1]/fit[0], 1/fit[0]
|
||||||
Cam.log += '\nFit result: slope = {:.5f} '.format(slope)
|
Cam.log += '\nFit result: slope = {:.5f} '.format(slope)
|
||||||
Cam.log += 'offset = {}'.format(int(offset))
|
Cam.log += 'offset = {}'.format(int(offset))
|
||||||
"""
|
"""
|
||||||
optional plotting code
|
optional plotting code
|
||||||
"""
|
"""
|
||||||
if plot:
|
if plot:
|
||||||
x = np.linspace(max(g0)*1.1,100)
|
x = np.linspace(max(g0)*1.1, 100)
|
||||||
y = slope*x + offset
|
y = slope*x + offset
|
||||||
plt.title('GEQ Linear Fit')
|
plt.title('GEQ Linear Fit')
|
||||||
plt.plot(x,y,color='red',ls='--',label='fit')
|
plt.plot(x, y, color='red', ls='--', label='fit')
|
||||||
plt.scatter(g0,gdiff,color='b',label='data')
|
plt.scatter(g0, gdiff, color='b', label='data')
|
||||||
plt.ylabel('Difference in green channels')
|
plt.ylabel('Difference in green channels')
|
||||||
plt.xlabel('Green value')
|
plt.xlabel('Green value')
|
||||||
"""
|
"""
|
||||||
|
@ -158,22 +158,22 @@ def geq_fit(Cam,plot):
|
||||||
"""
|
"""
|
||||||
if plot:
|
if plot:
|
||||||
y2 = slope*x + offset
|
y2 = slope*x + offset
|
||||||
plt.plot(x,y2,color='green',ls='--',label='scaled fit')
|
plt.plot(x, y2, color='green', ls='--', label='scaled fit')
|
||||||
plt.legend()
|
plt.legend()
|
||||||
plt.grid()
|
plt.grid()
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
return round(slope,5),int(offset)
|
return round(slope, 5), int(offset)
|
||||||
|
|
||||||
""""
|
""""
|
||||||
Return green channels of macbeth patches
|
Return green channels of macbeth patches
|
||||||
returns g0,g1 where
|
returns g0, g1 where
|
||||||
> g0 is green next to red
|
> g0 is green next to red
|
||||||
> g1 is green next to blue
|
> g1 is green next to blue
|
||||||
"""
|
"""
|
||||||
def geq(Cam,Img):
|
def geq(Cam, Img):
|
||||||
Cam.log += '\nProcessing image {}'.format(Img.name)
|
Cam.log += '\nProcessing image {}'.format(Img.name)
|
||||||
patches = [Img.patches[i] for i in Img.order][1:3]
|
patches = [Img.patches[i] for i in Img.order][1:3]
|
||||||
g_patches = np.array([(np.mean(patches[0][i]),np.mean(patches[1][i])) for i in range(24)])
|
g_patches = np.array([(np.mean(patches[0][i]), np.mean(patches[1][i])) for i in range(24)])
|
||||||
Cam.log += '\n'
|
Cam.log += '\n'
|
||||||
return(g_patches)
|
return(g_patches)
|
||||||
|
|
|
@ -18,7 +18,7 @@ Once image is extracted from data, it finds 24 16x16 patches for each
|
||||||
channel, centred at the macbeth chart squares
|
channel, centred at the macbeth chart squares
|
||||||
"""
|
"""
|
||||||
class Image:
|
class Image:
|
||||||
def __init__(self,buf):
|
def __init__(self, buf):
|
||||||
self.buf = buf
|
self.buf = buf
|
||||||
self.patches = None
|
self.patches = None
|
||||||
self.saturated = False
|
self.saturated = False
|
||||||
|
@ -45,11 +45,11 @@ class Image:
|
||||||
Channel order depending on bayer pattern
|
Channel order depending on bayer pattern
|
||||||
"""
|
"""
|
||||||
bayer_case = {
|
bayer_case = {
|
||||||
0 : (0,1,2,3), #red
|
0 : (0, 1, 2, 3), #red
|
||||||
1 : (2,0,3,1), #green next to red
|
1 : (2, 0, 3, 1), #green next to red
|
||||||
2 : (3,2,1,0), #green next to blue
|
2 : (3, 2, 1, 0), #green next to blue
|
||||||
3 : (1,0,3,2), #blue
|
3 : (1, 0, 3, 2), #blue
|
||||||
128 : (0,1,2,3) #arbitrary order for greyscale casw
|
128 : (0, 1, 2, 3) #arbitrary order for greyscale casw
|
||||||
}
|
}
|
||||||
self.order = bayer_case[self.pattern]
|
self.order = bayer_case[self.pattern]
|
||||||
|
|
||||||
|
@ -87,7 +87,7 @@ class Image:
|
||||||
"""
|
"""
|
||||||
get image from raw scanline data
|
get image from raw scanline data
|
||||||
"""
|
"""
|
||||||
def get_image(self,raw):
|
def get_image(self, raw):
|
||||||
self.dptr = []
|
self.dptr = []
|
||||||
"""
|
"""
|
||||||
check if data is 10 or 12 bits
|
check if data is 10 or 12 bits
|
||||||
|
@ -100,31 +100,31 @@ class Image:
|
||||||
"""
|
"""
|
||||||
stack scan lines into matrix
|
stack scan lines into matrix
|
||||||
"""
|
"""
|
||||||
raw = np.array(raw).reshape(-1,lin_len).astype(np.int64)[:self.h,...]
|
raw = np.array(raw).reshape(-1, lin_len).astype(np.int64)[:self.h, ...]
|
||||||
"""
|
"""
|
||||||
separate 5 bits in each package, stopping when w is satisfied
|
separate 5 bits in each package, stopping when w is satisfied
|
||||||
"""
|
"""
|
||||||
ba0 = raw[...,0:5*((self.w+3)>>2):5]
|
ba0 = raw[..., 0:5*((self.w+3)>>2):5]
|
||||||
ba1 = raw[...,1:5*((self.w+3)>>2):5]
|
ba1 = raw[..., 1:5*((self.w+3)>>2):5]
|
||||||
ba2 = raw[...,2:5*((self.w+3)>>2):5]
|
ba2 = raw[..., 2:5*((self.w+3)>>2):5]
|
||||||
ba3 = raw[...,3:5*((self.w+3)>>2):5]
|
ba3 = raw[..., 3:5*((self.w+3)>>2):5]
|
||||||
ba4 = raw[...,4:5*((self.w+3)>>2):5]
|
ba4 = raw[..., 4:5*((self.w+3)>>2):5]
|
||||||
"""
|
"""
|
||||||
assemble 10 bit numbers
|
assemble 10 bit numbers
|
||||||
"""
|
"""
|
||||||
ch0 = np.left_shift((np.left_shift(ba0,2) + (ba4%4)),6)
|
ch0 = np.left_shift((np.left_shift(ba0, 2) + (ba4%4)), 6)
|
||||||
ch1 = np.left_shift((np.left_shift(ba1,2) + (np.right_shift(ba4,2)%4)),6)
|
ch1 = np.left_shift((np.left_shift(ba1, 2) + (np.right_shift(ba4, 2)%4)), 6)
|
||||||
ch2 = np.left_shift((np.left_shift(ba2,2) + (np.right_shift(ba4,4)%4)),6)
|
ch2 = np.left_shift((np.left_shift(ba2, 2) + (np.right_shift(ba4, 4)%4)), 6)
|
||||||
ch3 = np.left_shift((np.left_shift(ba3,2) + (np.right_shift(ba4,6)%4)),6)
|
ch3 = np.left_shift((np.left_shift(ba3, 2) + (np.right_shift(ba4, 6)%4)), 6)
|
||||||
"""
|
"""
|
||||||
interleave bits
|
interleave bits
|
||||||
"""
|
"""
|
||||||
mat = np.empty((self.h,self.w),dtype=ch0.dtype)
|
mat = np.empty((self.h, self.w), dtype=ch0.dtype)
|
||||||
|
|
||||||
mat[...,0::4] = ch0
|
mat[..., 0::4] = ch0
|
||||||
mat[...,1::4] = ch1
|
mat[..., 1::4] = ch1
|
||||||
mat[...,2::4] = ch2
|
mat[..., 2::4] = ch2
|
||||||
mat[...,3::4] = ch3
|
mat[..., 3::4] = ch3
|
||||||
|
|
||||||
"""
|
"""
|
||||||
There is som eleaking memory somewhere in the code. This code here
|
There is som eleaking memory somewhere in the code. This code here
|
||||||
|
@ -132,25 +132,25 @@ class Image:
|
||||||
reasonable numbers of images, however this is techincally just a
|
reasonable numbers of images, however this is techincally just a
|
||||||
workaround. (sorry)
|
workaround. (sorry)
|
||||||
"""
|
"""
|
||||||
ba0,ba1,ba2,ba3,ba4 = None,None,None,None,None
|
ba0, ba1, ba2, ba3, ba4 = None, None, None, None, None
|
||||||
del ba0,ba1,ba2,ba3,ba4
|
del ba0, ba1, ba2, ba3, ba4
|
||||||
ch0,ch1,ch2,ch3 = None,None,None,None
|
ch0, ch1, ch2, ch3 = None, None, None, None
|
||||||
del ch0,ch1,ch2,ch3
|
del ch0, ch1, ch2, ch3
|
||||||
|
|
||||||
"""
|
"""
|
||||||
same as before but 12 bit case
|
same as before but 12 bit case
|
||||||
"""
|
"""
|
||||||
elif self.sigbits == 12:
|
elif self.sigbits == 12:
|
||||||
lin_len = ((((((self.w+self.pad+1)>>1)) * 3)+31)>>5) * 32
|
lin_len = ((((((self.w+self.pad+1)>>1)) * 3)+31)>>5) * 32
|
||||||
raw = np.array(raw).reshape(-1,lin_len).astype(np.int64)[:self.h,...]
|
raw = np.array(raw).reshape(-1, lin_len).astype(np.int64)[:self.h, ...]
|
||||||
ba0 = raw[...,0:3*((self.w+1)>>1):3]
|
ba0 = raw[..., 0:3*((self.w+1)>>1):3]
|
||||||
ba1 = raw[...,1:3*((self.w+1)>>1):3]
|
ba1 = raw[..., 1:3*((self.w+1)>>1):3]
|
||||||
ba2 = raw[...,2:3*((self.w+1)>>1):3]
|
ba2 = raw[..., 2:3*((self.w+1)>>1):3]
|
||||||
ch0 = np.left_shift((np.left_shift(ba0,4) + ba2%16),4)
|
ch0 = np.left_shift((np.left_shift(ba0, 4) + ba2%16), 4)
|
||||||
ch1 = np.left_shift((np.left_shift(ba1,4) + (np.right_shift(ba2,4))%16),4)
|
ch1 = np.left_shift((np.left_shift(ba1, 4) + (np.right_shift(ba2, 4))%16), 4)
|
||||||
mat = np.empty((self.h,self.w),dtype=ch0.dtype)
|
mat = np.empty((self.h, self.w), dtype=ch0.dtype)
|
||||||
mat[...,0::2] = ch0
|
mat[..., 0::2] = ch0
|
||||||
mat[...,1::2] = ch1
|
mat[..., 1::2] = ch1
|
||||||
|
|
||||||
else:
|
else:
|
||||||
"""
|
"""
|
||||||
|
@ -162,21 +162,21 @@ class Image:
|
||||||
"""
|
"""
|
||||||
separate bayer channels
|
separate bayer channels
|
||||||
"""
|
"""
|
||||||
c0 = mat[0::2,0::2]
|
c0 = mat[0::2, 0::2]
|
||||||
c1 = mat[0::2,1::2]
|
c1 = mat[0::2, 1::2]
|
||||||
c2 = mat[1::2,0::2]
|
c2 = mat[1::2, 0::2]
|
||||||
c3 = mat[1::2,1::2]
|
c3 = mat[1::2, 1::2]
|
||||||
self.channels = [c0,c1,c2,c3]
|
self.channels = [c0, c1, c2, c3]
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
"""
|
"""
|
||||||
obtain 16x16 patch centred at macbeth square centre for each channel
|
obtain 16x16 patch centred at macbeth square centre for each channel
|
||||||
"""
|
"""
|
||||||
def get_patches(self,cen_coords,size=16):
|
def get_patches(self, cen_coords, size=16):
|
||||||
"""
|
"""
|
||||||
obtain channel widths and heights
|
obtain channel widths and heights
|
||||||
"""
|
"""
|
||||||
ch_w,ch_h = self.w,self.h
|
ch_w, ch_h = self.w, self.h
|
||||||
cen_coords = list(np.array((cen_coords[0])).astype(np.int32))
|
cen_coords = list(np.array((cen_coords[0])).astype(np.int32))
|
||||||
self.cen_coords = cen_coords
|
self.cen_coords = cen_coords
|
||||||
"""
|
"""
|
||||||
|
@ -184,10 +184,10 @@ class Image:
|
||||||
left to right. Some useful patch indices:
|
left to right. Some useful patch indices:
|
||||||
white = 3
|
white = 3
|
||||||
black = 23
|
black = 23
|
||||||
'reds' = 9,10
|
'reds' = 9, 10
|
||||||
'blues' = 2,5,8,20,22
|
'blues' = 2, 5, 8, 20, 22
|
||||||
'greens' = 6,12,17
|
'greens' = 6, 12, 17
|
||||||
greyscale = 3,7,11,15,19,23
|
greyscale = 3, 7, 11, 15, 19, 23
|
||||||
"""
|
"""
|
||||||
all_patches = []
|
all_patches = []
|
||||||
for ch in self.channels:
|
for ch in self.channels:
|
||||||
|
@ -199,7 +199,7 @@ class Image:
|
||||||
Patch pixels are sorted by pixel brightness so spatial
|
Patch pixels are sorted by pixel brightness so spatial
|
||||||
information is lost.
|
information is lost.
|
||||||
'''
|
'''
|
||||||
patch = ch[cen[1]-7:cen[1]+9,cen[0]-7:cen[0]+9].flatten()
|
patch = ch[cen[1]-7:cen[1]+9, cen[0]-7:cen[0]+9].flatten()
|
||||||
patch.sort()
|
patch.sort()
|
||||||
if patch[-5] == (2**self.sigbits-1)*2**(16-self.sigbits):
|
if patch[-5] == (2**self.sigbits-1)*2**(16-self.sigbits):
|
||||||
self.saturated = True
|
self.saturated = True
|
||||||
|
@ -218,7 +218,7 @@ def brcm_load_image(Cam, im_str):
|
||||||
"""
|
"""
|
||||||
create byte array
|
create byte array
|
||||||
"""
|
"""
|
||||||
with open(im_str,'rb') as image:
|
with open(im_str, 'rb') as image:
|
||||||
f = image.read()
|
f = image.read()
|
||||||
b = bytearray(f)
|
b = bytearray(f)
|
||||||
"""
|
"""
|
||||||
|
@ -249,7 +249,7 @@ def brcm_load_image(Cam, im_str):
|
||||||
"""
|
"""
|
||||||
note index is divided by two to go from string to hex
|
note index is divided by two to go from string to hex
|
||||||
"""
|
"""
|
||||||
indices = [m.start()//2 for m in re.finditer(match_str,b_str)]
|
indices = [m.start()//2 for m in re.finditer(match_str, b_str)]
|
||||||
# print(indices)
|
# print(indices)
|
||||||
try:
|
try:
|
||||||
start = indices[0] + 3
|
start = indices[0] + 3
|
||||||
|
@ -325,10 +325,10 @@ def dng_load_image(Cam, im_str):
|
||||||
raw_im = raw.imread(im_str)
|
raw_im = raw.imread(im_str)
|
||||||
raw_data = raw_im.raw_image
|
raw_data = raw_im.raw_image
|
||||||
shift = 16 - Img.sigbits
|
shift = 16 - Img.sigbits
|
||||||
c0 = np.left_shift(raw_data[0::2,0::2].astype(np.int64), shift)
|
c0 = np.left_shift(raw_data[0::2, 0::2].astype(np.int64), shift)
|
||||||
c1 = np.left_shift(raw_data[0::2,1::2].astype(np.int64), shift)
|
c1 = np.left_shift(raw_data[0::2, 1::2].astype(np.int64), shift)
|
||||||
c2 = np.left_shift(raw_data[1::2,0::2].astype(np.int64), shift)
|
c2 = np.left_shift(raw_data[1::2, 0::2].astype(np.int64), shift)
|
||||||
c3 = np.left_shift(raw_data[1::2,1::2].astype(np.int64), shift)
|
c3 = np.left_shift(raw_data[1::2, 1::2].astype(np.int64), shift)
|
||||||
Img.channels = [c0, c1, c2, c3]
|
Img.channels = [c0, c1, c2, c3]
|
||||||
|
|
||||||
except:
|
except:
|
||||||
|
@ -347,7 +347,7 @@ check correct filetype
|
||||||
mac boolean is true if image is expected to contain macbeth chart and false
|
mac boolean is true if image is expected to contain macbeth chart and false
|
||||||
if not (alsc images don't have macbeth charts)
|
if not (alsc images don't have macbeth charts)
|
||||||
'''
|
'''
|
||||||
def load_image(Cam,im_str,mac_config=None,show=False,mac=True,show_meta=False):
|
def load_image(Cam, im_str, mac_config=None, show=False, mac=True, show_meta=False):
|
||||||
"""
|
"""
|
||||||
check image is correct filetype
|
check image is correct filetype
|
||||||
"""
|
"""
|
||||||
|
@ -363,7 +363,7 @@ def load_image(Cam,im_str,mac_config=None,show=False,mac=True,show_meta=False):
|
||||||
"""
|
"""
|
||||||
find macbeth centres, discarding images that are too dark or light
|
find macbeth centres, discarding images that are too dark or light
|
||||||
"""
|
"""
|
||||||
av_chan = (np.mean(np.array(Img.channels),axis=0)/(2**16))
|
av_chan = (np.mean(np.array(Img.channels), axis=0)/(2**16))
|
||||||
av_val = np.mean(av_chan)
|
av_val = np.mean(av_chan)
|
||||||
# print(av_val)
|
# print(av_val)
|
||||||
if av_val < Img.blacklevel_16/(2**16)+1/64:
|
if av_val < Img.blacklevel_16/(2**16)+1/64:
|
||||||
|
@ -371,7 +371,7 @@ def load_image(Cam,im_str,mac_config=None,show=False,mac=True,show_meta=False):
|
||||||
print('\nError: Image too dark!')
|
print('\nError: Image too dark!')
|
||||||
Cam.log += '\nWARNING: Image too dark!'
|
Cam.log += '\nWARNING: Image too dark!'
|
||||||
else:
|
else:
|
||||||
macbeth = find_macbeth(Cam,av_chan,mac_config)
|
macbeth = find_macbeth(Cam, av_chan, mac_config)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if no macbeth found return error
|
if no macbeth found return error
|
||||||
|
@ -405,8 +405,8 @@ def load_image(Cam,im_str,mac_config=None,show=False,mac=True,show_meta=False):
|
||||||
"""
|
"""
|
||||||
if show and __name__ == '__main__':
|
if show and __name__ == '__main__':
|
||||||
copy = sum(Img.channels)/2**18
|
copy = sum(Img.channels)/2**18
|
||||||
copy = np.reshape(copy,(Img.h//2,Img.w//2)).astype(np.float64)
|
copy = np.reshape(copy, (Img.h//2, Img.w//2)).astype(np.float64)
|
||||||
copy,_ = reshape(copy,800)
|
copy, _ = reshape(copy, 800)
|
||||||
represent(copy)
|
represent(copy)
|
||||||
|
|
||||||
return Img
|
return Img
|
||||||
|
|
|
@ -8,7 +8,7 @@ from ctt_tools import *
|
||||||
"""
|
"""
|
||||||
Find lux values from metadata and calculate Y
|
Find lux values from metadata and calculate Y
|
||||||
"""
|
"""
|
||||||
def lux(Cam,Img):
|
def lux(Cam, Img):
|
||||||
shutter_speed = Img.exposure
|
shutter_speed = Img.exposure
|
||||||
gain = Img.againQ8_norm
|
gain = Img.againQ8_norm
|
||||||
aperture = 1
|
aperture = 1
|
||||||
|
@ -17,12 +17,12 @@ def lux(Cam,Img):
|
||||||
Cam.log += '\nAperture = {}'.format(aperture)
|
Cam.log += '\nAperture = {}'.format(aperture)
|
||||||
patches = [Img.patches[i] for i in Img.order]
|
patches = [Img.patches[i] for i in Img.order]
|
||||||
channels = [Img.channels[i] for i in Img.order]
|
channels = [Img.channels[i] for i in Img.order]
|
||||||
return lux_calc(Cam,Img,patches,channels),shutter_speed,gain
|
return lux_calc(Cam, Img, patches, channels), shutter_speed, gain
|
||||||
|
|
||||||
"""
|
"""
|
||||||
perform lux calibration on bayer channels
|
perform lux calibration on bayer channels
|
||||||
"""
|
"""
|
||||||
def lux_calc(Cam,Img,patches,channels):
|
def lux_calc(Cam, Img, patches, channels):
|
||||||
"""
|
"""
|
||||||
find means color channels on grey patches
|
find means color channels on grey patches
|
||||||
"""
|
"""
|
||||||
|
@ -30,14 +30,14 @@ def lux_calc(Cam,Img,patches,channels):
|
||||||
ap_g = (np.mean(patches[1][3::4])+np.mean(patches[2][3::4]))/2
|
ap_g = (np.mean(patches[1][3::4])+np.mean(patches[2][3::4]))/2
|
||||||
ap_b = np.mean(patches[3][3::4])
|
ap_b = np.mean(patches[3][3::4])
|
||||||
Cam.log += '\nAverage channel values on grey patches:'
|
Cam.log += '\nAverage channel values on grey patches:'
|
||||||
Cam.log += '\nRed = {:.0f} Green = {:.0f} Blue = {:.0f}'.format(ap_r,ap_b,ap_g)
|
Cam.log += '\nRed = {:.0f} Green = {:.0f} Blue = {:.0f}'.format(ap_r, ap_b, ap_g)
|
||||||
# print(ap_r,ap_g,ap_b)
|
# print(ap_r, ap_g, ap_b)
|
||||||
"""
|
"""
|
||||||
calculate channel gains
|
calculate channel gains
|
||||||
"""
|
"""
|
||||||
gr = ap_g/ap_r
|
gr = ap_g/ap_r
|
||||||
gb = ap_g/ap_b
|
gb = ap_g/ap_b
|
||||||
Cam.log += '\nChannel gains: Red = {:.3f} Blue = {:.3f}'.format(gr,gb)
|
Cam.log += '\nChannel gains: Red = {:.3f} Blue = {:.3f}'.format(gr, gb)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
find means color channels on image and scale by gain
|
find means color channels on image and scale by gain
|
||||||
|
@ -47,8 +47,8 @@ def lux_calc(Cam,Img,patches,channels):
|
||||||
a_g = (np.mean(channels[1])+np.mean(channels[2]))/2
|
a_g = (np.mean(channels[1])+np.mean(channels[2]))/2
|
||||||
a_b = np.mean(channels[3])*gb
|
a_b = np.mean(channels[3])*gb
|
||||||
Cam.log += '\nAverage channel values over entire image scaled by channel gains:'
|
Cam.log += '\nAverage channel values over entire image scaled by channel gains:'
|
||||||
Cam.log += '\nRed = {:.0f} Green = {:.0f} Blue = {:.0f}'.format(a_r,a_b,a_g)
|
Cam.log += '\nRed = {:.0f} Green = {:.0f} Blue = {:.0f}'.format(a_r, a_b, a_g)
|
||||||
# print(a_r,a_g,a_b)
|
# print(a_r, a_g, a_b)
|
||||||
"""
|
"""
|
||||||
Calculate y with top row of yuv matrix
|
Calculate y with top row of yuv matrix
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -19,15 +19,15 @@ the clustering algorithm. This catches these warnings so they don't flood the
|
||||||
output to the console
|
output to the console
|
||||||
"""
|
"""
|
||||||
def fxn():
|
def fxn():
|
||||||
warnings.warn("runtime",RuntimeWarning)
|
warnings.warn("runtime", RuntimeWarning)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Define the success message
|
Define the success message
|
||||||
"""
|
"""
|
||||||
success_msg = 'Macbeth chart located successfully'
|
success_msg = 'Macbeth chart located successfully'
|
||||||
|
|
||||||
def find_macbeth(Cam,img,mac_config=(0,0)):
|
def find_macbeth(Cam, img, mac_config=(0, 0)):
|
||||||
small_chart,show = mac_config
|
small_chart, show = mac_config
|
||||||
print('Locating macbeth chart')
|
print('Locating macbeth chart')
|
||||||
Cam.log += '\nLocating macbeth chart'
|
Cam.log += '\nLocating macbeth chart'
|
||||||
"""
|
"""
|
||||||
|
@ -40,20 +40,20 @@ def find_macbeth(Cam,img,mac_config=(0,0)):
|
||||||
Reference macbeth chart is created that will be correlated with the located
|
Reference macbeth chart is created that will be correlated with the located
|
||||||
macbeth chart guess to produce a confidence value for the match.
|
macbeth chart guess to produce a confidence value for the match.
|
||||||
"""
|
"""
|
||||||
ref = cv2.imread(Cam.path +'ctt_ref.pgm',flags=cv2.IMREAD_GRAYSCALE)
|
ref = cv2.imread(Cam.path +'ctt_ref.pgm', flags=cv2.IMREAD_GRAYSCALE)
|
||||||
ref_w = 120
|
ref_w = 120
|
||||||
ref_h = 80
|
ref_h = 80
|
||||||
rc1 = (0,0)
|
rc1 = (0, 0)
|
||||||
rc2 = (0,ref_h)
|
rc2 = (0, ref_h)
|
||||||
rc3 = (ref_w,ref_h)
|
rc3 = (ref_w, ref_h)
|
||||||
rc4 = (ref_w,0)
|
rc4 = (ref_w, 0)
|
||||||
ref_corns = np.array((rc1,rc2,rc3,rc4),np.float32)
|
ref_corns = np.array((rc1, rc2, rc3, rc4), np.float32)
|
||||||
ref_data = (ref,ref_w,ref_h,ref_corns)
|
ref_data = (ref, ref_w, ref_h, ref_corns)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
locate macbeth chart
|
locate macbeth chart
|
||||||
"""
|
"""
|
||||||
cor,mac,coords,msg = get_macbeth_chart(img,ref_data)
|
cor, mac, coords, msg = get_macbeth_chart(img, ref_data)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
following bits of code tries to fix common problems with simple
|
following bits of code tries to fix common problems with simple
|
||||||
|
@ -68,20 +68,20 @@ def find_macbeth(Cam,img,mac_config=(0,0)):
|
||||||
"""
|
"""
|
||||||
if cor < 0.75:
|
if cor < 0.75:
|
||||||
a = 2
|
a = 2
|
||||||
img_br = cv2.convertScaleAbs(img,alpha=a,beta=0)
|
img_br = cv2.convertScaleAbs(img, alpha=a, beta=0)
|
||||||
cor_b,mac_b,coords_b,msg_b = get_macbeth_chart(img_br,ref_data)
|
cor_b, mac_b, coords_b, msg_b = get_macbeth_chart(img_br, ref_data)
|
||||||
if cor_b > cor:
|
if cor_b > cor:
|
||||||
cor,mac,coords,msg = cor_b,mac_b,coords_b,msg_b
|
cor, mac, coords, msg = cor_b, mac_b, coords_b, msg_b
|
||||||
|
|
||||||
"""
|
"""
|
||||||
brighten image 4x
|
brighten image 4x
|
||||||
"""
|
"""
|
||||||
if cor < 0.75:
|
if cor < 0.75:
|
||||||
a = 4
|
a = 4
|
||||||
img_br = cv2.convertScaleAbs(img,alpha=a,beta=0)
|
img_br = cv2.convertScaleAbs(img, alpha=a, beta=0)
|
||||||
cor_b,mac_b,coords_b,msg_b = get_macbeth_chart(img_br,ref_data)
|
cor_b, mac_b, coords_b, msg_b = get_macbeth_chart(img_br, ref_data)
|
||||||
if cor_b > cor:
|
if cor_b > cor:
|
||||||
cor,mac,coords,msg = cor_b,mac_b,coords_b,msg_b
|
cor, mac, coords, msg = cor_b, mac_b, coords_b, msg_b
|
||||||
|
|
||||||
"""
|
"""
|
||||||
In case macbeth chart is too small, take a selection of the image and
|
In case macbeth chart is too small, take a selection of the image and
|
||||||
|
@ -115,7 +115,7 @@ def find_macbeth(Cam,img,mac_config=(0,0)):
|
||||||
get size of image
|
get size of image
|
||||||
"""
|
"""
|
||||||
shape = list(img.shape[:2])
|
shape = list(img.shape[:2])
|
||||||
w,h = shape
|
w, h = shape
|
||||||
"""
|
"""
|
||||||
set dimensions of the subselection and the step along each axis between
|
set dimensions of the subselection and the step along each axis between
|
||||||
selections
|
selections
|
||||||
|
@ -129,9 +129,9 @@ def find_macbeth(Cam,img,mac_config=(0,0)):
|
||||||
"""
|
"""
|
||||||
for i in range(3):
|
for i in range(3):
|
||||||
for j in range(3):
|
for j in range(3):
|
||||||
w_s,h_s = i*w_inc,j*h_inc
|
w_s, h_s = i*w_inc, j*h_inc
|
||||||
img_sel = img[w_s:w_s+w_sel,h_s:h_s+h_sel]
|
img_sel = img[w_s:w_s+w_sel, h_s:h_s+h_sel]
|
||||||
cor_ij,mac_ij,coords_ij,msg_ij = get_macbeth_chart(img_sel,ref_data)
|
cor_ij, mac_ij, coords_ij, msg_ij = get_macbeth_chart(img_sel, ref_data)
|
||||||
"""
|
"""
|
||||||
if the correlation is better than the best then record the
|
if the correlation is better than the best then record the
|
||||||
scale and current subselection at which macbeth chart was
|
scale and current subselection at which macbeth chart was
|
||||||
|
@ -139,9 +139,9 @@ def find_macbeth(Cam,img,mac_config=(0,0)):
|
||||||
"""
|
"""
|
||||||
if cor_ij > cor:
|
if cor_ij > cor:
|
||||||
cor = cor_ij
|
cor = cor_ij
|
||||||
mac,coords,msg = mac_ij,coords_ij,msg_ij
|
mac, coords, msg = mac_ij, coords_ij, msg_ij
|
||||||
ii,jj = i,j
|
ii, jj = i, j
|
||||||
w_best,h_best = w_inc,h_inc
|
w_best, h_best = w_inc, h_inc
|
||||||
d_best = 1
|
d_best = 1
|
||||||
|
|
||||||
|
|
||||||
|
@ -151,21 +151,21 @@ def find_macbeth(Cam,img,mac_config=(0,0)):
|
||||||
if cor < 0.75:
|
if cor < 0.75:
|
||||||
imgs = []
|
imgs = []
|
||||||
shape = list(img.shape[:2])
|
shape = list(img.shape[:2])
|
||||||
w,h = shape
|
w, h = shape
|
||||||
w_sel = int(w/2)
|
w_sel = int(w/2)
|
||||||
h_sel = int(h/2)
|
h_sel = int(h/2)
|
||||||
w_inc = int(w/8)
|
w_inc = int(w/8)
|
||||||
h_inc = int(h/8)
|
h_inc = int(h/8)
|
||||||
for i in range(5):
|
for i in range(5):
|
||||||
for j in range(5):
|
for j in range(5):
|
||||||
w_s,h_s = i*w_inc,j*h_inc
|
w_s, h_s = i*w_inc, j*h_inc
|
||||||
img_sel = img[w_s:w_s+w_sel,h_s:h_s+h_sel]
|
img_sel = img[w_s:w_s+w_sel, h_s:h_s+h_sel]
|
||||||
cor_ij,mac_ij,coords_ij,msg_ij = get_macbeth_chart(img_sel,ref_data)
|
cor_ij, mac_ij, coords_ij, msg_ij = get_macbeth_chart(img_sel, ref_data)
|
||||||
if cor_ij > cor:
|
if cor_ij > cor:
|
||||||
cor = cor_ij
|
cor = cor_ij
|
||||||
mac,coords,msg = mac_ij,coords_ij,msg_ij
|
mac, coords, msg = mac_ij, coords_ij, msg_ij
|
||||||
ii,jj = i,j
|
ii, jj = i, j
|
||||||
w_best,h_best = w_inc,h_inc
|
w_best, h_best = w_inc, h_inc
|
||||||
d_best = 2
|
d_best = 2
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
@ -183,41 +183,41 @@ def find_macbeth(Cam,img,mac_config=(0,0)):
|
||||||
if cor < 0.75 and d_best > 1 :
|
if cor < 0.75 and d_best > 1 :
|
||||||
imgs = []
|
imgs = []
|
||||||
shape = list(img.shape[:2])
|
shape = list(img.shape[:2])
|
||||||
w,h = shape
|
w, h = shape
|
||||||
w_sel = int(w/3)
|
w_sel = int(w/3)
|
||||||
h_sel = int(h/3)
|
h_sel = int(h/3)
|
||||||
w_inc = int(w/12)
|
w_inc = int(w/12)
|
||||||
h_inc = int(h/12)
|
h_inc = int(h/12)
|
||||||
for i in range(9):
|
for i in range(9):
|
||||||
for j in range(9):
|
for j in range(9):
|
||||||
w_s,h_s = i*w_inc,j*h_inc
|
w_s, h_s = i*w_inc, j*h_inc
|
||||||
img_sel = img[w_s:w_s+w_sel,h_s:h_s+h_sel]
|
img_sel = img[w_s:w_s+w_sel, h_s:h_s+h_sel]
|
||||||
cor_ij,mac_ij,coords_ij,msg_ij = get_macbeth_chart(img_sel,ref_data)
|
cor_ij, mac_ij, coords_ij, msg_ij = get_macbeth_chart(img_sel, ref_data)
|
||||||
if cor_ij > cor:
|
if cor_ij > cor:
|
||||||
cor = cor_ij
|
cor = cor_ij
|
||||||
mac,coords,msg = mac_ij,coords_ij,msg_ij
|
mac, coords, msg = mac_ij, coords_ij, msg_ij
|
||||||
ii,jj = i,j
|
ii, jj = i, j
|
||||||
w_best,h_best = w_inc,h_inc
|
w_best, h_best = w_inc, h_inc
|
||||||
d_best = 3
|
d_best = 3
|
||||||
|
|
||||||
if cor < 0.75 and d_best > 2:
|
if cor < 0.75 and d_best > 2:
|
||||||
imgs = []
|
imgs = []
|
||||||
shape = list(img.shape[:2])
|
shape = list(img.shape[:2])
|
||||||
w,h = shape
|
w, h = shape
|
||||||
w_sel = int(w/4)
|
w_sel = int(w/4)
|
||||||
h_sel = int(h/4)
|
h_sel = int(h/4)
|
||||||
w_inc = int(w/16)
|
w_inc = int(w/16)
|
||||||
h_inc = int(h/16)
|
h_inc = int(h/16)
|
||||||
for i in range(13):
|
for i in range(13):
|
||||||
for j in range(13):
|
for j in range(13):
|
||||||
w_s,h_s = i*w_inc,j*h_inc
|
w_s, h_s = i*w_inc, j*h_inc
|
||||||
img_sel = img[w_s:w_s+w_sel,h_s:h_s+h_sel]
|
img_sel = img[w_s:w_s+w_sel, h_s:h_s+h_sel]
|
||||||
cor_ij,mac_ij,coords_ij,msg_ij = get_macbeth_chart(img_sel,ref_data)
|
cor_ij, mac_ij, coords_ij, msg_ij = get_macbeth_chart(img_sel, ref_data)
|
||||||
if cor_ij > cor:
|
if cor_ij > cor:
|
||||||
cor = cor_ij
|
cor = cor_ij
|
||||||
mac,coords,msg = mac_ij,coords_ij,msg_ij
|
mac, coords, msg = mac_ij, coords_ij, msg_ij
|
||||||
ii,jj = i,j
|
ii, jj = i, j
|
||||||
w_best,h_best = w_inc,h_inc
|
w_best, h_best = w_inc, h_inc
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Transform coordinates from subselection to original image
|
Transform coordinates from subselection to original image
|
||||||
|
@ -241,7 +241,7 @@ def find_macbeth(Cam,img,mac_config=(0,0)):
|
||||||
if msg == success_msg:
|
if msg == success_msg:
|
||||||
coords_fit = coords
|
coords_fit = coords
|
||||||
Cam.log += '\nMacbeth chart vertices:\n'
|
Cam.log += '\nMacbeth chart vertices:\n'
|
||||||
Cam.log += '{}'.format(2*np.round(coords_fit[0][0]),0)
|
Cam.log += '{}'.format(2*np.round(coords_fit[0][0]), 0)
|
||||||
"""
|
"""
|
||||||
if correlation is lower than 0.75 there may be a risk of macbeth chart
|
if correlation is lower than 0.75 there may be a risk of macbeth chart
|
||||||
corners not having been located properly. It might be worth running
|
corners not having been located properly. It might be worth running
|
||||||
|
@ -253,8 +253,8 @@ def find_macbeth(Cam,img,mac_config=(0,0)):
|
||||||
if cor < 0.75:
|
if cor < 0.75:
|
||||||
print('Caution: Low confidence guess!')
|
print('Caution: Low confidence guess!')
|
||||||
Cam.log += 'WARNING: Low confidence guess!'
|
Cam.log += 'WARNING: Low confidence guess!'
|
||||||
# cv2.imshow('MacBeth',mac)
|
# cv2.imshow('MacBeth', mac)
|
||||||
# represent(mac,'MacBeth chart')
|
# represent(mac, 'MacBeth chart')
|
||||||
|
|
||||||
"""
|
"""
|
||||||
extract data from coords_fit and plot on original image
|
extract data from coords_fit and plot on original image
|
||||||
|
@ -269,7 +269,7 @@ def find_macbeth(Cam,img,mac_config=(0,0)):
|
||||||
"""
|
"""
|
||||||
for vert in verts:
|
for vert in verts:
|
||||||
p = tuple(np.round(vert).astype(np.int32))
|
p = tuple(np.round(vert).astype(np.int32))
|
||||||
cv2.circle(copy,p,10,1,-1)
|
cv2.circle(copy, p, 10, 1, -1)
|
||||||
"""
|
"""
|
||||||
draw circles at centres of squares
|
draw circles at centres of squares
|
||||||
"""
|
"""
|
||||||
|
@ -281,17 +281,17 @@ def find_macbeth(Cam,img,mac_config=(0,0)):
|
||||||
grey circle everywhere else.
|
grey circle everywhere else.
|
||||||
"""
|
"""
|
||||||
if i == 3:
|
if i == 3:
|
||||||
cv2.circle(copy,p,8,0,-1)
|
cv2.circle(copy, p, 8, 0, -1)
|
||||||
elif i == 23:
|
elif i == 23:
|
||||||
cv2.circle(copy,p,8,1,-1)
|
cv2.circle(copy, p, 8, 1, -1)
|
||||||
else:
|
else:
|
||||||
cv2.circle(copy,p,8,0.5,-1)
|
cv2.circle(copy, p, 8, 0.5, -1)
|
||||||
copy,_ = reshape(copy,400)
|
copy, _ = reshape(copy, 400)
|
||||||
represent(copy)
|
represent(copy)
|
||||||
|
|
||||||
return(coords_fit)
|
return(coords_fit)
|
||||||
|
|
||||||
def get_macbeth_chart(img,ref_data):
|
def get_macbeth_chart(img, ref_data):
|
||||||
"""
|
"""
|
||||||
function returns coordinates of macbeth chart vertices and square centres,
|
function returns coordinates of macbeth chart vertices and square centres,
|
||||||
along with an error/success message for debugging purposes. Additionally,
|
along with an error/success message for debugging purposes. Additionally,
|
||||||
|
@ -316,7 +316,7 @@ def get_macbeth_chart(img,ref_data):
|
||||||
"""
|
"""
|
||||||
get reference macbeth chart data
|
get reference macbeth chart data
|
||||||
"""
|
"""
|
||||||
(ref,ref_w,ref_h,ref_corns) = ref_data
|
(ref, ref_w, ref_h, ref_corns) = ref_data
|
||||||
|
|
||||||
"""
|
"""
|
||||||
the code will raise and catch a MacbethError in case of a problem, trying
|
the code will raise and catch a MacbethError in case of a problem, trying
|
||||||
|
@ -327,10 +327,10 @@ def get_macbeth_chart(img,ref_data):
|
||||||
obtain image, convert to grayscale and normalise
|
obtain image, convert to grayscale and normalise
|
||||||
"""
|
"""
|
||||||
src = img
|
src = img
|
||||||
src,factor = reshape(src,200)
|
src, factor = reshape(src, 200)
|
||||||
original=src.copy()
|
original=src.copy()
|
||||||
a = 125/np.average(src)
|
a = 125/np.average(src)
|
||||||
src_norm = cv2.convertScaleAbs(src,alpha=a,beta=0)
|
src_norm = cv2.convertScaleAbs(src, alpha=a, beta=0)
|
||||||
"""
|
"""
|
||||||
This code checks if there are seperate colour channels. In the past the
|
This code checks if there are seperate colour channels. In the past the
|
||||||
macbeth locator ran on jpgs and this makes it robust to different
|
macbeth locator ran on jpgs and this makes it robust to different
|
||||||
|
@ -338,11 +338,11 @@ def get_macbeth_chart(img,ref_data):
|
||||||
average bayer channel so coordinates must be doubled.
|
average bayer channel so coordinates must be doubled.
|
||||||
|
|
||||||
This is best done in img_load.py in the get_patches method. The
|
This is best done in img_load.py in the get_patches method. The
|
||||||
coordinates and image width,height must be divided by two if the
|
coordinates and image width, height must be divided by two if the
|
||||||
macbeth locator has been run on a demosaicked image.
|
macbeth locator has been run on a demosaicked image.
|
||||||
"""
|
"""
|
||||||
if len(src_norm.shape) == 3:
|
if len(src_norm.shape) == 3:
|
||||||
src_bw = cv2.cvtColor(src_norm,cv2.COLOR_BGR2GRAY)
|
src_bw = cv2.cvtColor(src_norm, cv2.COLOR_BGR2GRAY)
|
||||||
else:
|
else:
|
||||||
src_bw = src_norm
|
src_bw = src_norm
|
||||||
original_bw = src_bw.copy()
|
original_bw = src_bw.copy()
|
||||||
|
@ -350,20 +350,20 @@ def get_macbeth_chart(img,ref_data):
|
||||||
obtain image edges
|
obtain image edges
|
||||||
"""
|
"""
|
||||||
sigma=2
|
sigma=2
|
||||||
src_bw = cv2.GaussianBlur(src_bw,(0,0),sigma)
|
src_bw = cv2.GaussianBlur(src_bw, (0, 0), sigma)
|
||||||
t1,t2 = 50,100
|
t1, t2 = 50, 100
|
||||||
edges = cv2.Canny(src_bw,t1,t2)
|
edges = cv2.Canny(src_bw, t1, t2)
|
||||||
"""
|
"""
|
||||||
dilate edges to prevent self-intersections in contours
|
dilate edges to prevent self-intersections in contours
|
||||||
"""
|
"""
|
||||||
k_size = 2
|
k_size = 2
|
||||||
kernel = np.ones((k_size,k_size))
|
kernel = np.ones((k_size, k_size))
|
||||||
its = 1
|
its = 1
|
||||||
edges = cv2.dilate(edges,kernel,iterations=its)
|
edges = cv2.dilate(edges, kernel, iterations=its)
|
||||||
"""
|
"""
|
||||||
find Contours in image
|
find Contours in image
|
||||||
"""
|
"""
|
||||||
conts,_ = cv2.findContours(edges,
|
conts, _ = cv2.findContours(edges,
|
||||||
cv2.RETR_TREE,
|
cv2.RETR_TREE,
|
||||||
cv2.CHAIN_APPROX_NONE)
|
cv2.CHAIN_APPROX_NONE)
|
||||||
if len(conts) == 0:
|
if len(conts) == 0:
|
||||||
|
@ -380,11 +380,11 @@ def get_macbeth_chart(img,ref_data):
|
||||||
epsilon = 0.07
|
epsilon = 0.07
|
||||||
conts_per = []
|
conts_per = []
|
||||||
for i in range(len(conts)):
|
for i in range(len(conts)):
|
||||||
per = cv2.arcLength(conts[i],True)
|
per = cv2.arcLength(conts[i], True)
|
||||||
poly = cv2.approxPolyDP(conts[i],
|
poly = cv2.approxPolyDP(conts[i],
|
||||||
epsilon*per,True)
|
epsilon*per, True)
|
||||||
if len(poly) == 4 and cv2.isContourConvex(poly):
|
if len(poly) == 4 and cv2.isContourConvex(poly):
|
||||||
conts_per.append((poly,per))
|
conts_per.append((poly, per))
|
||||||
|
|
||||||
if len(conts_per) == 0:
|
if len(conts_per) == 0:
|
||||||
raise MacbethError(
|
raise MacbethError(
|
||||||
|
@ -399,11 +399,11 @@ def get_macbeth_chart(img,ref_data):
|
||||||
"""
|
"""
|
||||||
sort contours by perimeter and get perimeters within percent of median
|
sort contours by perimeter and get perimeters within percent of median
|
||||||
"""
|
"""
|
||||||
conts_per = sorted(conts_per,key=lambda x:x[1])
|
conts_per = sorted(conts_per, key=lambda x: x[1])
|
||||||
med_per = conts_per[int(len(conts_per)/2)][1]
|
med_per = conts_per[int(len(conts_per)/2)][1]
|
||||||
side = med_per/4
|
side = med_per/4
|
||||||
perc = 0.1
|
perc = 0.1
|
||||||
med_low,med_high = med_per*(1-perc),med_per*(1+perc)
|
med_low, med_high = med_per*(1-perc), med_per*(1+perc)
|
||||||
squares = []
|
squares = []
|
||||||
for i in conts_per:
|
for i in conts_per:
|
||||||
if med_low <= i[1] and med_high >= i[1]:
|
if med_low <= i[1] and med_high >= i[1]:
|
||||||
|
@ -412,7 +412,7 @@ def get_macbeth_chart(img,ref_data):
|
||||||
"""
|
"""
|
||||||
obtain coordinates of nomralised macbeth and squares
|
obtain coordinates of nomralised macbeth and squares
|
||||||
"""
|
"""
|
||||||
square_verts,mac_norm = get_square_verts(0.06)
|
square_verts, mac_norm = get_square_verts(0.06)
|
||||||
"""
|
"""
|
||||||
for each square guess, find 24 possible macbeth chart centres
|
for each square guess, find 24 possible macbeth chart centres
|
||||||
"""
|
"""
|
||||||
|
@ -432,11 +432,11 @@ def get_macbeth_chart(img,ref_data):
|
||||||
"""
|
"""
|
||||||
reorder vertices to prevent 'hourglass shape'
|
reorder vertices to prevent 'hourglass shape'
|
||||||
"""
|
"""
|
||||||
square = sorted(square,key=lambda x:x[0])
|
square = sorted(square, key=lambda x: x[0])
|
||||||
square_1 = sorted(square[:2],key=lambda x:x[1])
|
square_1 = sorted(square[:2], key=lambda x: x[1])
|
||||||
square_2 = sorted(square[2:],key=lambda x:-x[1])
|
square_2 = sorted(square[2:], key=lambda x: -x[1])
|
||||||
square = np.array(np.concatenate((square_1,square_2)),np.float32)
|
square = np.array(np.concatenate((square_1, square_2)), np.float32)
|
||||||
square = np.reshape(square,(4,2)).astype(np.float32)
|
square = np.reshape(square, (4, 2)).astype(np.float32)
|
||||||
squares[i] = square
|
squares[i] = square
|
||||||
"""
|
"""
|
||||||
find 24 possible macbeth chart centres by trasnforming normalised
|
find 24 possible macbeth chart centres by trasnforming normalised
|
||||||
|
@ -444,8 +444,8 @@ def get_macbeth_chart(img,ref_data):
|
||||||
"""
|
"""
|
||||||
for j in range(len(square_verts)):
|
for j in range(len(square_verts)):
|
||||||
verts = square_verts[j]
|
verts = square_verts[j]
|
||||||
p_mat = cv2.getPerspectiveTransform(verts,square)
|
p_mat = cv2.getPerspectiveTransform(verts, square)
|
||||||
mac_guess = cv2.perspectiveTransform(mac_norm,p_mat)
|
mac_guess = cv2.perspectiveTransform(mac_norm, p_mat)
|
||||||
mac_guess = np.round(mac_guess).astype(np.int32)
|
mac_guess = np.round(mac_guess).astype(np.int32)
|
||||||
"""
|
"""
|
||||||
keep only if candidate macbeth is within image border
|
keep only if candidate macbeth is within image border
|
||||||
|
@ -465,7 +465,7 @@ def get_macbeth_chart(img,ref_data):
|
||||||
if in_border:
|
if in_border:
|
||||||
mac_mid = np.mean(mac_guess,
|
mac_mid = np.mean(mac_guess,
|
||||||
axis=1)
|
axis=1)
|
||||||
mac_mids.append([mac_mid,(i,j)])
|
mac_mids.append([mac_mid, (i, j)])
|
||||||
|
|
||||||
if len(mac_mids) == 0:
|
if len(mac_mids) == 0:
|
||||||
raise MacbethError(
|
raise MacbethError(
|
||||||
|
@ -497,14 +497,14 @@ def get_macbeth_chart(img,ref_data):
|
||||||
special case of only one valid centre found (probably not needed)
|
special case of only one valid centre found (probably not needed)
|
||||||
"""
|
"""
|
||||||
clus_list = []
|
clus_list = []
|
||||||
clus_list.append([mac_mids,len(mac_mids)])
|
clus_list.append([mac_mids, len(mac_mids)])
|
||||||
|
|
||||||
else:
|
else:
|
||||||
clustering.fit(mac_mids_list)
|
clustering.fit(mac_mids_list)
|
||||||
# try:
|
# try:
|
||||||
# clustering.fit(mac_mids_list)
|
# clustering.fit(mac_mids_list)
|
||||||
# except RuntimeWarning as error:
|
# except RuntimeWarning as error:
|
||||||
# return(0,None,None,error)
|
# return(0, None, None, error)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
create list of all clusters
|
create list of all clusters
|
||||||
|
@ -512,19 +512,19 @@ def get_macbeth_chart(img,ref_data):
|
||||||
clus_list = []
|
clus_list = []
|
||||||
if clustering.n_clusters_ >1:
|
if clustering.n_clusters_ >1:
|
||||||
for i in range(clustering.labels_.max()+1):
|
for i in range(clustering.labels_.max()+1):
|
||||||
indices = [j for j,x in enumerate(clustering.labels_) if x == i]
|
indices = [j for j, x in enumerate(clustering.labels_) if x == i]
|
||||||
clus = []
|
clus = []
|
||||||
for index in indices:
|
for index in indices:
|
||||||
clus.append(mac_mids[index])
|
clus.append(mac_mids[index])
|
||||||
clus_list.append([clus,len(clus)])
|
clus_list.append([clus, len(clus)])
|
||||||
clus_list.sort(key=lambda x:-x[1])
|
clus_list.sort(key=lambda x: -x[1])
|
||||||
|
|
||||||
elif clustering.n_clusters_ == 1:
|
elif clustering.n_clusters_ == 1:
|
||||||
"""
|
"""
|
||||||
special case of only one cluster found
|
special case of only one cluster found
|
||||||
"""
|
"""
|
||||||
# print('only 1 cluster')
|
# print('only 1 cluster')
|
||||||
clus_list.append([mac_mids,len(mac_mids)])
|
clus_list.append([mac_mids, len(mac_mids)])
|
||||||
else:
|
else:
|
||||||
raise MacbethError(
|
raise MacbethError(
|
||||||
'\nWARNING: No macebth chart found!'
|
'\nWARNING: No macebth chart found!'
|
||||||
|
@ -542,19 +542,19 @@ def get_macbeth_chart(img,ref_data):
|
||||||
if clus_list[i][1] < clus_len_max * clus_tol:
|
if clus_list[i][1] < clus_len_max * clus_tol:
|
||||||
clus_list = clus_list[:i]
|
clus_list = clus_list[:i]
|
||||||
break
|
break
|
||||||
cent = np.mean(clus_list[i][0],axis=0)[0]
|
cent = np.mean(clus_list[i][0], axis=0)[0]
|
||||||
clus_list[i].append(cent)
|
clus_list[i].append(cent)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
represent most popular cluster centroids
|
represent most popular cluster centroids
|
||||||
"""
|
"""
|
||||||
# copy = original_bw.copy()
|
# copy = original_bw.copy()
|
||||||
# copy = cv2.cvtColor(copy,cv2.COLOR_GRAY2RGB)
|
# copy = cv2.cvtColor(copy, cv2.COLOR_GRAY2RGB)
|
||||||
# copy = cv2.resize(copy,None,fx=2,fy=2)
|
# copy = cv2.resize(copy, None, fx=2, fy=2)
|
||||||
# for clus in clus_list:
|
# for clus in clus_list:
|
||||||
# centroid = tuple(2*np.round(clus[2]).astype(np.int32))
|
# centroid = tuple(2*np.round(clus[2]).astype(np.int32))
|
||||||
# cv2.circle(copy,centroid,7,(255,0,0),-1)
|
# cv2.circle(copy, centroid, 7, (255, 0, 0), -1)
|
||||||
# cv2.circle(copy,centroid,2,(0,0,255),-1)
|
# cv2.circle(copy, centroid, 2, (0, 0, 255), -1)
|
||||||
# represent(copy)
|
# represent(copy)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
@ -578,7 +578,7 @@ def get_macbeth_chart(img,ref_data):
|
||||||
ref_cents = []
|
ref_cents = []
|
||||||
i_list = [p[1][0] for p in clus]
|
i_list = [p[1][0] for p in clus]
|
||||||
for point in clus:
|
for point in clus:
|
||||||
i,j = point[1]
|
i, j = point[1]
|
||||||
"""
|
"""
|
||||||
remove any square that voted for two different points within
|
remove any square that voted for two different points within
|
||||||
the same cluster. This causes the same point in the image to be
|
the same cluster. This causes the same point in the image to be
|
||||||
|
@ -591,7 +591,7 @@ def get_macbeth_chart(img,ref_data):
|
||||||
"""
|
"""
|
||||||
if i_list.count(i) == 1:
|
if i_list.count(i) == 1:
|
||||||
square = squares_raw[i]
|
square = squares_raw[i]
|
||||||
sq_cent = np.mean(square,axis=0)
|
sq_cent = np.mean(square, axis=0)
|
||||||
ref_cent = reference[j]
|
ref_cent = reference[j]
|
||||||
sq_cents.append(sq_cent)
|
sq_cents.append(sq_cent)
|
||||||
ref_cents.append(ref_cent)
|
ref_cents.append(ref_cent)
|
||||||
|
@ -614,7 +614,7 @@ def get_macbeth_chart(img,ref_data):
|
||||||
"""
|
"""
|
||||||
find best fit transform from normalised centres to image
|
find best fit transform from normalised centres to image
|
||||||
"""
|
"""
|
||||||
h_mat,mask = cv2.findHomography(ref_cents,sq_cents)
|
h_mat, mask = cv2.findHomography(ref_cents, sq_cents)
|
||||||
if 'None' in str(type(h_mat)):
|
if 'None' in str(type(h_mat)):
|
||||||
raise MacbethError(
|
raise MacbethError(
|
||||||
'\nERROR\n'
|
'\nERROR\n'
|
||||||
|
@ -623,8 +623,8 @@ def get_macbeth_chart(img,ref_data):
|
||||||
"""
|
"""
|
||||||
transform normalised corners and centres into image space
|
transform normalised corners and centres into image space
|
||||||
"""
|
"""
|
||||||
mac_fit = cv2.perspectiveTransform(mac_norm,h_mat)
|
mac_fit = cv2.perspectiveTransform(mac_norm, h_mat)
|
||||||
mac_cen_fit = cv2.perspectiveTransform(np.array([reference]),h_mat)
|
mac_cen_fit = cv2.perspectiveTransform(np.array([reference]), h_mat)
|
||||||
"""
|
"""
|
||||||
transform located corners into reference space
|
transform located corners into reference space
|
||||||
"""
|
"""
|
||||||
|
@ -633,18 +633,18 @@ def get_macbeth_chart(img,ref_data):
|
||||||
np.array([ref_corns])
|
np.array([ref_corns])
|
||||||
)
|
)
|
||||||
map_to_ref = cv2.warpPerspective(
|
map_to_ref = cv2.warpPerspective(
|
||||||
original_bw,ref_mat,
|
original_bw, ref_mat,
|
||||||
(ref_w,ref_h)
|
(ref_w, ref_h)
|
||||||
)
|
)
|
||||||
"""
|
"""
|
||||||
normalise brigthness
|
normalise brigthness
|
||||||
"""
|
"""
|
||||||
a = 125/np.average(map_to_ref)
|
a = 125/np.average(map_to_ref)
|
||||||
map_to_ref = cv2.convertScaleAbs(map_to_ref,alpha=a,beta=0)
|
map_to_ref = cv2.convertScaleAbs(map_to_ref, alpha=a, beta=0)
|
||||||
"""
|
"""
|
||||||
find correlation with bw reference macbeth
|
find correlation with bw reference macbeth
|
||||||
"""
|
"""
|
||||||
cor = correlate(map_to_ref,ref)
|
cor = correlate(map_to_ref, ref)
|
||||||
"""
|
"""
|
||||||
keep only if best correlation
|
keep only if best correlation
|
||||||
"""
|
"""
|
||||||
|
@ -660,21 +660,21 @@ def get_macbeth_chart(img,ref_data):
|
||||||
upside-down
|
upside-down
|
||||||
"""
|
"""
|
||||||
mac_fit_inv = np.array(
|
mac_fit_inv = np.array(
|
||||||
([[mac_fit[0][2],mac_fit[0][3],
|
([[mac_fit[0][2], mac_fit[0][3],
|
||||||
mac_fit[0][0],mac_fit[0][1]]])
|
mac_fit[0][0], mac_fit[0][1]]])
|
||||||
)
|
)
|
||||||
mac_cen_fit_inv = np.flip(mac_cen_fit,axis=1)
|
mac_cen_fit_inv = np.flip(mac_cen_fit, axis=1)
|
||||||
ref_mat = cv2.getPerspectiveTransform(
|
ref_mat = cv2.getPerspectiveTransform(
|
||||||
mac_fit_inv,
|
mac_fit_inv,
|
||||||
np.array([ref_corns])
|
np.array([ref_corns])
|
||||||
)
|
)
|
||||||
map_to_ref = cv2.warpPerspective(
|
map_to_ref = cv2.warpPerspective(
|
||||||
original_bw,ref_mat,
|
original_bw, ref_mat,
|
||||||
(ref_w,ref_h)
|
(ref_w, ref_h)
|
||||||
)
|
)
|
||||||
a = 125/np.average(map_to_ref)
|
a = 125/np.average(map_to_ref)
|
||||||
map_to_ref = cv2.convertScaleAbs(map_to_ref,alpha=a,beta=0)
|
map_to_ref = cv2.convertScaleAbs(map_to_ref, alpha=a, beta=0)
|
||||||
cor = correlate(map_to_ref,ref)
|
cor = correlate(map_to_ref, ref)
|
||||||
if cor > max_cor:
|
if cor > max_cor:
|
||||||
max_cor = cor
|
max_cor = cor
|
||||||
best_map = map_to_ref
|
best_map = map_to_ref
|
||||||
|
@ -704,45 +704,45 @@ def get_macbeth_chart(img,ref_data):
|
||||||
draw macbeth corners and centres on image
|
draw macbeth corners and centres on image
|
||||||
"""
|
"""
|
||||||
copy = original.copy()
|
copy = original.copy()
|
||||||
copy = cv2.resize(original,None,fx=2,fy=2)
|
copy = cv2.resize(original, None, fx=2, fy=2)
|
||||||
# print('correlation = {}'.format(round(max_cor,2)))
|
# print('correlation = {}'.format(round(max_cor, 2)))
|
||||||
for point in best_fit[0]:
|
for point in best_fit[0]:
|
||||||
point = np.array(point,np.float32)
|
point = np.array(point, np.float32)
|
||||||
point = tuple(2*np.round(point).astype(np.int32))
|
point = tuple(2*np.round(point).astype(np.int32))
|
||||||
cv2.circle(copy,point,4,(255,0,0),-1)
|
cv2.circle(copy, point, 4, (255, 0, 0), -1)
|
||||||
for point in best_cen_fit[0]:
|
for point in best_cen_fit[0]:
|
||||||
point = np.array(point,np.float32)
|
point = np.array(point, np.float32)
|
||||||
point = tuple(2*np.round(point).astype(np.int32))
|
point = tuple(2*np.round(point).astype(np.int32))
|
||||||
cv2.circle(copy,point,4,(0,0,255),-1)
|
cv2.circle(copy, point, 4, (0, 0, 255), -1)
|
||||||
copy = copy.copy()
|
copy = copy.copy()
|
||||||
cv2.circle(copy,point,4,(0,0,255),-1)
|
cv2.circle(copy, point, 4, (0, 0, 255), -1)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
represent coloured macbeth in reference space
|
represent coloured macbeth in reference space
|
||||||
"""
|
"""
|
||||||
best_map_col = cv2.warpPerspective(
|
best_map_col = cv2.warpPerspective(
|
||||||
original,best_ref_mat,(ref_w,ref_h)
|
original, best_ref_mat, (ref_w, ref_h)
|
||||||
)
|
)
|
||||||
best_map_col = cv2.resize(
|
best_map_col = cv2.resize(
|
||||||
best_map_col,None,fx=4,fy=4
|
best_map_col, None, fx=4, fy=4
|
||||||
)
|
)
|
||||||
a = 125/np.average(best_map_col)
|
a = 125/np.average(best_map_col)
|
||||||
best_map_col_norm = cv2.convertScaleAbs(
|
best_map_col_norm = cv2.convertScaleAbs(
|
||||||
best_map_col,alpha=a,beta=0
|
best_map_col, alpha=a, beta=0
|
||||||
)
|
)
|
||||||
# cv2.imshow('Macbeth',best_map_col)
|
# cv2.imshow('Macbeth', best_map_col)
|
||||||
# represent(copy)
|
# represent(copy)
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
rescale coordinates to original image size
|
rescale coordinates to original image size
|
||||||
"""
|
"""
|
||||||
fit_coords = (best_fit/factor,best_cen_fit/factor)
|
fit_coords = (best_fit/factor, best_cen_fit/factor)
|
||||||
|
|
||||||
return(max_cor,best_map_col_norm,fit_coords,success_msg)
|
return(max_cor, best_map_col_norm, fit_coords, success_msg)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
catch macbeth errors and continue with code
|
catch macbeth errors and continue with code
|
||||||
"""
|
"""
|
||||||
except MacbethError as error:
|
except MacbethError as error:
|
||||||
return(0,None,None,error)
|
return(0, None, None, error)
|
||||||
|
|
|
@ -12,7 +12,7 @@ Find noise standard deviation and fit to model:
|
||||||
|
|
||||||
noise std = a + b*sqrt(pixel mean)
|
noise std = a + b*sqrt(pixel mean)
|
||||||
"""
|
"""
|
||||||
def noise(Cam,Img,plot):
|
def noise(Cam, Img, plot):
|
||||||
Cam.log += '\nProcessing image: {}'.format(Img.name)
|
Cam.log += '\nProcessing image: {}'.format(Img.name)
|
||||||
stds = []
|
stds = []
|
||||||
means = []
|
means = []
|
||||||
|
@ -36,14 +36,14 @@ def noise(Cam,Img,plot):
|
||||||
"""
|
"""
|
||||||
stds = np.array(stds)
|
stds = np.array(stds)
|
||||||
means = np.array(means)
|
means = np.array(means)
|
||||||
means = np.clip(np.array(means),0,None)
|
means = np.clip(np.array(means), 0, None)
|
||||||
sq_means = np.sqrt(means)
|
sq_means = np.sqrt(means)
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
least squares fit model
|
least squares fit model
|
||||||
"""
|
"""
|
||||||
fit = np.polyfit(sq_means,stds,1)
|
fit = np.polyfit(sq_means, stds, 1)
|
||||||
Cam.log += '\nBlack level = {}'.format(Img.blacklevel_16)
|
Cam.log += '\nBlack level = {}'.format(Img.blacklevel_16)
|
||||||
Cam.log += '\nNoise profile: offset = {}'.format(int(fit[1]))
|
Cam.log += '\nNoise profile: offset = {}'.format(int(fit[1]))
|
||||||
Cam.log += ' slope = {:.3f}'.format(fit[0])
|
Cam.log += ' slope = {:.3f}'.format(fit[0])
|
||||||
|
@ -59,8 +59,8 @@ def noise(Cam,Img,plot):
|
||||||
fit_score_norm = fit_score - fit_std
|
fit_score_norm = fit_score - fit_std
|
||||||
anom_ind = np.where(fit_score_norm > 1)
|
anom_ind = np.where(fit_score_norm > 1)
|
||||||
fit_score_norm.sort()
|
fit_score_norm.sort()
|
||||||
sq_means_clean = np.delete(sq_means,anom_ind)
|
sq_means_clean = np.delete(sq_means, anom_ind)
|
||||||
stds_clean = np.delete(stds,anom_ind)
|
stds_clean = np.delete(stds, anom_ind)
|
||||||
removed = len(stds) - len(stds_clean)
|
removed = len(stds) - len(stds_clean)
|
||||||
if removed != 0:
|
if removed != 0:
|
||||||
Cam.log += '\nIdentified and removed {} anomalies.'.format(removed)
|
Cam.log += '\nIdentified and removed {} anomalies.'.format(removed)
|
||||||
|
@ -68,7 +68,7 @@ def noise(Cam,Img,plot):
|
||||||
"""
|
"""
|
||||||
recalculate fit with outliers removed
|
recalculate fit with outliers removed
|
||||||
"""
|
"""
|
||||||
fit = np.polyfit(sq_means_clean,stds_clean,1)
|
fit = np.polyfit(sq_means_clean, stds_clean, 1)
|
||||||
Cam.log += '\nNoise profile: offset = {}'.format(int(fit[1]))
|
Cam.log += '\nNoise profile: offset = {}'.format(int(fit[1]))
|
||||||
Cam.log += ' slope = {:.3f}'.format(fit[0])
|
Cam.log += ' slope = {:.3f}'.format(fit[0])
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@ def noise(Cam,Img,plot):
|
||||||
corrected = 1
|
corrected = 1
|
||||||
ones = np.ones(len(means))
|
ones = np.ones(len(means))
|
||||||
y_data = stds/sq_means
|
y_data = stds/sq_means
|
||||||
fit2 = np.polyfit(ones,y_data,0)
|
fit2 = np.polyfit(ones, y_data, 0)
|
||||||
Cam.log += '\nOffset below zero. Fit recalculated with zero offset'
|
Cam.log += '\nOffset below zero. Fit recalculated with zero offset'
|
||||||
Cam.log += '\nNoise profile: offset = 0'
|
Cam.log += '\nNoise profile: offset = 0'
|
||||||
Cam.log += ' slope = {:.3f}'.format(fit2[0])
|
Cam.log += ' slope = {:.3f}'.format(fit2[0])
|
||||||
|
@ -94,13 +94,13 @@ def noise(Cam,Img,plot):
|
||||||
if plot:
|
if plot:
|
||||||
x = np.arange(sq_means.max()//0.88)
|
x = np.arange(sq_means.max()//0.88)
|
||||||
fit_plot = x*fit[0] + fit[1]
|
fit_plot = x*fit[0] + fit[1]
|
||||||
plt.scatter(sq_means,stds,label='data',color='blue')
|
plt.scatter(sq_means, stds, label='data', color='blue')
|
||||||
plt.scatter(sq_means[anom_ind],stds[anom_ind],color='orange',label='anomalies')
|
plt.scatter(sq_means[anom_ind], stds[anom_ind], color='orange', label='anomalies')
|
||||||
plt.plot(x,fit_plot,label='fit',color='red',ls=':')
|
plt.plot(x, fit_plot, label='fit', color='red', ls=':')
|
||||||
if fit[1] < 0:
|
if fit[1] < 0:
|
||||||
fit_plot_2 = x*fit2[0]
|
fit_plot_2 = x*fit2[0]
|
||||||
plt.plot(x,fit_plot_2,label='fit 0 intercept',color='green',ls='--')
|
plt.plot(x, fit_plot_2, label='fit 0 intercept', color='green', ls='--')
|
||||||
plt.plot(0,0)
|
plt.plot(0, 0)
|
||||||
plt.title('Noise Plot\nImg: {}'.format(Img.str))
|
plt.title('Noise Plot\nImg: {}'.format(Img.str))
|
||||||
plt.legend(loc = 'upper left')
|
plt.legend(loc = 'upper left')
|
||||||
plt.xlabel('Sqrt Pixel Value')
|
plt.xlabel('Sqrt Pixel Value')
|
||||||
|
@ -116,7 +116,7 @@ def noise(Cam,Img,plot):
|
||||||
"""
|
"""
|
||||||
Cam.log += '\n'
|
Cam.log += '\n'
|
||||||
if corrected:
|
if corrected:
|
||||||
fit = [fit2[0],0]
|
fit = [fit2[0], 0]
|
||||||
return fit
|
return fit
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -11,59 +11,59 @@ scale = 2
|
||||||
"""
|
"""
|
||||||
constructs normalised macbeth chart corners for ransac algorithm
|
constructs normalised macbeth chart corners for ransac algorithm
|
||||||
"""
|
"""
|
||||||
def get_square_verts(c_err = 0.05,scale = scale):
|
def get_square_verts(c_err = 0.05, scale = scale):
|
||||||
"""
|
"""
|
||||||
define macbeth chart corners
|
define macbeth chart corners
|
||||||
"""
|
"""
|
||||||
b_bord_x,b_bord_y = scale*8.5,scale*13
|
b_bord_x, b_bord_y = scale*8.5, scale*13
|
||||||
s_bord = 6*scale
|
s_bord = 6*scale
|
||||||
side = 41*scale
|
side = 41*scale
|
||||||
x_max = side*6 + 5*s_bord + 2*b_bord_x
|
x_max = side*6 + 5*s_bord + 2*b_bord_x
|
||||||
y_max = side*4 + 3*s_bord + 2*b_bord_y
|
y_max = side*4 + 3*s_bord + 2*b_bord_y
|
||||||
c1 = (0,0)
|
c1 = (0, 0)
|
||||||
c2 = (0,y_max)
|
c2 = (0, y_max)
|
||||||
c3 = (x_max,y_max)
|
c3 = (x_max, y_max)
|
||||||
c4 = (x_max,0)
|
c4 = (x_max, 0)
|
||||||
mac_norm = np.array((c1,c2,c3,c4),np.float32)
|
mac_norm = np.array((c1, c2, c3, c4), np.float32)
|
||||||
mac_norm = np.array([ mac_norm ])
|
mac_norm = np.array([ mac_norm ])
|
||||||
|
|
||||||
square_verts = []
|
square_verts = []
|
||||||
square_0 = np.array(((0,0),(0,side),
|
square_0 = np.array(((0, 0), (0, side),
|
||||||
(side,side),(side,0)),np.float32)
|
(side, side), (side, 0)), np.float32)
|
||||||
offset_0 = np.array((b_bord_x,b_bord_y),np.float32)
|
offset_0 = np.array((b_bord_x, b_bord_y), np.float32)
|
||||||
c_off = side * c_err
|
c_off = side * c_err
|
||||||
offset_cont = np.array(((c_off,c_off),(c_off,-c_off),
|
offset_cont = np.array(((c_off, c_off), (c_off, -c_off),
|
||||||
(-c_off,-c_off),(-c_off,c_off)),np.float32)
|
(-c_off, -c_off), (-c_off, c_off)), np.float32)
|
||||||
square_0 += offset_0
|
square_0 += offset_0
|
||||||
square_0 += offset_cont
|
square_0 += offset_cont
|
||||||
"""
|
"""
|
||||||
define macbeth square corners
|
define macbeth square corners
|
||||||
"""
|
"""
|
||||||
for i in range(6):
|
for i in range(6):
|
||||||
shift_i = np.array(((i*side,0),(i*side,0),
|
shift_i = np.array(((i*side, 0), (i*side, 0),
|
||||||
(i*side,0),(i*side,0)),np.float32)
|
(i*side, 0), (i*side, 0)), np.float32)
|
||||||
shift_bord =np.array(((i*s_bord,0),(i*s_bord,0),
|
shift_bord =np.array(((i*s_bord, 0), (i*s_bord, 0),
|
||||||
(i*s_bord,0),(i*s_bord,0)),np.float32)
|
(i*s_bord, 0), (i*s_bord, 0)), np.float32)
|
||||||
square_i = square_0 + shift_i + shift_bord
|
square_i = square_0 + shift_i + shift_bord
|
||||||
for j in range(4):
|
for j in range(4):
|
||||||
shift_j = np.array(((0,j*side),(0,j*side),
|
shift_j = np.array(((0, j*side), (0, j*side),
|
||||||
(0,j*side),(0,j*side)),np.float32)
|
(0, j*side), (0, j*side)), np.float32)
|
||||||
shift_bord = np.array(((0,j*s_bord),
|
shift_bord = np.array(((0, j*s_bord),
|
||||||
(0,j*s_bord),(0,j*s_bord),
|
(0, j*s_bord), (0, j*s_bord),
|
||||||
(0,j*s_bord)),np.float32)
|
(0, j*s_bord)), np.float32)
|
||||||
square_j = square_i + shift_j + shift_bord
|
square_j = square_i + shift_j + shift_bord
|
||||||
square_verts.append(square_j)
|
square_verts.append(square_j)
|
||||||
# print('square_verts')
|
# print('square_verts')
|
||||||
# print(square_verts)
|
# print(square_verts)
|
||||||
return np.array(square_verts,np.float32),mac_norm
|
return np.array(square_verts, np.float32), mac_norm
|
||||||
|
|
||||||
def get_square_centres(c_err = 0.05,scale=scale):
|
def get_square_centres(c_err = 0.05, scale=scale):
|
||||||
"""
|
"""
|
||||||
define macbeth square centres
|
define macbeth square centres
|
||||||
"""
|
"""
|
||||||
verts,mac_norm = get_square_verts(c_err,scale=scale)
|
verts, mac_norm = get_square_verts(c_err, scale=scale)
|
||||||
|
|
||||||
centres = np.mean(verts,axis = 1)
|
centres = np.mean(verts, axis = 1)
|
||||||
# print('centres')
|
# print('centres')
|
||||||
# print(centres)
|
# print(centres)
|
||||||
return np.array(centres,np.float32)
|
return np.array(centres, np.float32)
|
||||||
|
|
|
@ -26,7 +26,7 @@ readability in the main files.
|
||||||
obtain config values, unless it doesnt exist, in which case pick default
|
obtain config values, unless it doesnt exist, in which case pick default
|
||||||
Furthermore, it can check if the input is the correct type
|
Furthermore, it can check if the input is the correct type
|
||||||
"""
|
"""
|
||||||
def get_config(dictt,key,default,ttype):
|
def get_config(dictt, key, default, ttype):
|
||||||
try:
|
try:
|
||||||
val = dictt[key]
|
val = dictt[key]
|
||||||
if ttype == 'string':
|
if ttype == 'string':
|
||||||
|
@ -57,16 +57,16 @@ def parse_input():
|
||||||
raise ArgError('\n\nERROR! Enter value for each arguent passed.')
|
raise ArgError('\n\nERROR! Enter value for each arguent passed.')
|
||||||
params = arguments [0::2]
|
params = arguments [0::2]
|
||||||
vals = arguments [1::2]
|
vals = arguments [1::2]
|
||||||
args_dict = dict(zip(params,vals))
|
args_dict = dict(zip(params, vals))
|
||||||
json_output = get_config(args_dict,'-o',None,'string')
|
json_output = get_config(args_dict, '-o', None, 'string')
|
||||||
directory = get_config(args_dict,'-i',None,'string')
|
directory = get_config(args_dict, '-i', None, 'string')
|
||||||
config = get_config(args_dict,'-c',None,'string')
|
config = get_config(args_dict, '-c', None, 'string')
|
||||||
log_path = get_config(args_dict,'-l',None,'string')
|
log_path = get_config(args_dict, '-l', None, 'string')
|
||||||
if directory == None:
|
if directory == None:
|
||||||
raise ArgError('\n\nERROR! No input directory given.')
|
raise ArgError('\n\nERROR! No input directory given.')
|
||||||
if json_output == None:
|
if json_output == None:
|
||||||
raise ArgError('\n\nERROR! No output json given.')
|
raise ArgError('\n\nERROR! No output json given.')
|
||||||
return json_output,directory,config,log_path
|
return json_output, directory, config, log_path
|
||||||
"""
|
"""
|
||||||
custom arg and macbeth error class
|
custom arg and macbeth error class
|
||||||
"""
|
"""
|
||||||
|
@ -78,10 +78,10 @@ class MacbethError(Exception):
|
||||||
"""
|
"""
|
||||||
correlation function to quantify match
|
correlation function to quantify match
|
||||||
"""
|
"""
|
||||||
def correlate(im1,im2):
|
def correlate(im1, im2):
|
||||||
f1 = im1.flatten()
|
f1 = im1.flatten()
|
||||||
f2 = im2.flatten()
|
f2 = im2.flatten()
|
||||||
cor = np.corrcoef(f1,f2)
|
cor = np.corrcoef(f1, f2)
|
||||||
return cor[0][1]
|
return cor[0][1]
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
@ -97,13 +97,13 @@ def get_photos(directory='photos'):
|
||||||
"""
|
"""
|
||||||
display image for debugging... read at your own risk...
|
display image for debugging... read at your own risk...
|
||||||
"""
|
"""
|
||||||
def represent(img,name='image'):
|
def represent(img, name='image'):
|
||||||
# if type(img) == tuple or type(img) == list:
|
# if type(img) == tuple or type(img) == list:
|
||||||
# for i in range(len(img)):
|
# for i in range(len(img)):
|
||||||
# name = 'image {}'.format(i)
|
# name = 'image {}'.format(i)
|
||||||
# cv2.imshow(name,img[i])
|
# cv2.imshow(name, img[i])
|
||||||
# else:
|
# else:
|
||||||
# cv2.imshow(name,img)
|
# cv2.imshow(name, img)
|
||||||
# cv2.waitKey(0)
|
# cv2.waitKey(0)
|
||||||
# cv2.destroyAllWindows()
|
# cv2.destroyAllWindows()
|
||||||
# return 0
|
# return 0
|
||||||
|
@ -112,11 +112,11 @@ def represent(img,name='image'):
|
||||||
with their mouse to close the window.... therefore matplotlib is used....
|
with their mouse to close the window.... therefore matplotlib is used....
|
||||||
(thanks a lot opencv)
|
(thanks a lot opencv)
|
||||||
"""
|
"""
|
||||||
grid = plt.GridSpec(22,1)
|
grid = plt.GridSpec(22, 1)
|
||||||
plt.subplot(grid[:19,0])
|
plt.subplot(grid[:19, 0])
|
||||||
plt.imshow(img,cmap='gray')
|
plt.imshow(img, cmap='gray')
|
||||||
plt.axis('off')
|
plt.axis('off')
|
||||||
plt.subplot(grid[21,0])
|
plt.subplot(grid[21, 0])
|
||||||
plt.title('press \'q\' to continue')
|
plt.title('press \'q\' to continue')
|
||||||
plt.axis('off')
|
plt.axis('off')
|
||||||
plt.show()
|
plt.show()
|
||||||
|
@ -124,7 +124,7 @@ def represent(img,name='image'):
|
||||||
# f = plt.figure()
|
# f = plt.figure()
|
||||||
# ax = f.add_subplot(211)
|
# ax = f.add_subplot(211)
|
||||||
# ax2 = f.add_subplot(122)
|
# ax2 = f.add_subplot(122)
|
||||||
# ax.imshow(img,cmap='gray')
|
# ax.imshow(img, cmap='gray')
|
||||||
# ax.axis('off')
|
# ax.axis('off')
|
||||||
# ax2.set_figheight(2)
|
# ax2.set_figheight(2)
|
||||||
# ax2.title('press \'q\' to continue')
|
# ax2.title('press \'q\' to continue')
|
||||||
|
@ -136,6 +136,6 @@ def represent(img,name='image'):
|
||||||
reshape image to fixed width without distorting
|
reshape image to fixed width without distorting
|
||||||
returns image and scale factor
|
returns image and scale factor
|
||||||
"""
|
"""
|
||||||
def reshape(img,width):
|
def reshape(img, width):
|
||||||
factor = width/img.shape[0]
|
factor = width/img.shape[0]
|
||||||
return cv2.resize(img,None,fx=factor,fy=factor),factor
|
return cv2.resize(img, None, fx=factor, fy=factor), factor
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue