Commit 747ec2bf authored by Simon Grosche's avatar Simon Grosche
Browse files

initial code upload

parent f9252fae
__pycache__/
*.tiff
*.tif
*.png
.idea/
[CODE WILL BE UPLOADED AFTER PUBLICATION]
# Code for PADIS
This code is the reference implementation of probabilistic approach to dynamic image sampling (PADIS). PADIS is a dynamic sampling algorithm for images.
# Last tested with:
* Python 3.5
* Numpy 1.xx
Simon Grosche, Michael Koller, Jürgen Seiler, and André Kaup, "Dynamic Image Sampling Using a Novel Variance Based Probability Mass Function", Transactions on Computational Imaging, 2020. DOI: 10.1109/TCI.2020.3031077
Corresponding author: simon.grosche@fau.de
# Last tested in October 2020 using:
* python 3.6.9
* numpy 1.14.2
* scipy 1.0.0
* numba 0.38.1
* imageio
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
% Copyright (c) 2020 by %
% Chair of Multimedia Communications and Signal Processing %
% Friedrich-Alexander-Universität Erlangen-Nürnberg (FAU) %
% - all rights reserved - %
% %
% YOU ARE USING THIS PROGRAM AT YOUR OWN RISK! THE AUTHOR %
% IS NOT RESPONSIBLE FOR ANY DAMAGE OR DATA-LOSS CAUSED BY THE %
% USE OF THIS PROGRAM. %
% %
% %
% If you have any questions please contact: %
% %
% Simon Grosche, M.Sc. %
% Multimedia Communications and Signal Processing %
% University of Erlangen-Nuremberg %
% Cauerstr. 7 %
% 91058 Erlangen, Germany %
% %
% email: simon.grosche @ fau.de %
This Python program can generate different incremental patterns.
It implements the dynamic PADIS pattern presented in:
Simon Grosche, Michael Koller, Jürgen Seiler, and André Kaup,
"Dynamic Image Sampling Using a Novel Variance Based Probability Mass Function",
IEEE Transactions on Computational Imaging, 2020. DOI: 10.1109/TCI.2020.3031077
"""
import numpy as np
from scipy import misc
import padis_helpers
import sampling_helpers
import imageio
import os
if __name__ == "__main__":
import sys
# mask_case_if = 1 # gaussian-density optimized incremental pattern
# mask_case_if = 2 # Random incremental pattern
mask_case_if = 3 # dynamic incremental pattern with PADIS (once need to run 'mask_case_if = 1' before for the initalization!)
if mask_case_if==1:
"""
Create non-periodic Gaussian-density optimized incremental pattern as in paper:
Simon Grosche, Jürgen Seiler, and André Kaup, "Design Techniques for Incremental Non-Regular Image Sampling Patterns"
in Proc. IEEE International Conference on Imaging Systems and Techniques (IST), Krakow, 2018.
"""
densitylist = np.arange(0.01,0.99+1e-8,0.01)
# densitylist = [0.01,0.02,0.03] # just up to the values needed for initalization of PADIS
slist = [(576,1024),]
tau=7
for s_id,s in enumerate(slist):
dy, dx = s
for run_id, run in enumerate([1,]):
xy_list = sampling_helpers.create_iterative_gaussian_density_mask_nonperiodic(dy, dx, np.max(densitylist), 4, tau)
for den in densitylist:
mask_gauss = sampling_helpers.create_mask_from_list(dy,dx, den, xy_list)
sampling_helpers.validate_random_mask(mask_gauss, den)
params_text = '_%dx%d_rho%0.4f_run%d'%(dy, dx, den, run)
tau_text = '_sigm%d' % (tau)
sampling_helpers.create_and_save_largercopymask(mask_gauss*255, 'masks/gauss-non-periodic_mask'+tau_text+params_text+'.png', 1)
elif mask_case_if==2:
"""
Create fully random, incremental pattern
"""
densitylist = np.arange(0.01,0.99+1e-8,0.01)
slist = [(576,1024),]
for s_id,s in enumerate(slist):
dy, dx = s
for run_id, run in enumerate([1,]):
for den in densitylist:
mask = sampling_helpers.create_random_mask((dy,dx) , den)
sampling_helpers.validate_random_mask(mask, den)
params_text = '_%dx%d_rho%0.4f_run%d'%(dy,dx, den, run)
sampling_helpers.create_and_save_largercopymask(mask*255, 'masks/rand_mask'+params_text+'.png', 1)
elif mask_case_if == 3:
"""
Create dynamic pattern using PADIS algorithm newly presented in
Simon Grosche, Michael Koller, Jürgen Seiler, and André Kaup, "Dynamic Image Sampling Using a Novel Variance Based Probability Mass Function",
IEEE Transactions on Computational Imaging, 2020.
"""
# define location of the images that should be sampled.
img_fn_list = ['wikiimg%03d' % (i) for i in [1]]
img_filetype = '.tif'
img_folder = 'testimg/SEM_MUSE_images/'
img_case = 'wikisem'
densitylist = np.arange(0.01, 0.99+1e-8, 0.01)
max_add_curr = 0.01 # This is the parameter N_+ in the paper.
sig_min_curr = 1e-10 # not used in the paper -> set to zero.
sig_max_factor_curr = 1.2 # This is the parameter \gamma in the paper.
sample_params = padis_helpers.params(max_add=max_add_curr, ld_dens=None, des_dens=None, var_border=6, tau=7)
sample_params.sig_min_curr = sig_min_curr
sample_params.sig_max_factor_curr = sig_max_factor_curr
for img_fn in img_fn_list:
if img_case == 'wikisem':
# crop lower inset from the wikisem testset
img = imageio.imread(img_folder+img_fn+img_filetype) / 255
assert img.ndim == 2
dy, dx = np.shape(img)
img = img[:int(0.75 * dy)-int(0.75 * dy) % 4, :dx // 4 * 4]
del dy
del dx
else:
img = imageio.imread(img_folder+img_fn+img_filetype, as_gray=True) / 255
dy, dx = np.shape(img)
for den in densitylist:
print('next density: ', den)
# for the first three runs until 3%, take the optimized patterns from 'mask_case_if = 1' as initialization
if den in [0.01, 0.02, 0.03]:
mask = misc.imread('masks/gauss-non-periodic_mask_sigm7_%dx%d_rho%0.4f_run1.pngcopiedperiodically.png' % (dy, dx, den)) / 255
mask_initial = mask
else:
sample_params.des_dens = den
mask = padis_helpers.adaptive_sampling_mask_simplified5(img, sample_params, 1 * mask_initial)
mask_initial = 1 * mask
params_text = '_%dx%d_rho%0.4f' % (dy, dx, den)
case_text = 'adaptive5_startIST0.03'+'_maxadd%0.4f_sigmin%0.3f_sigmaxfactor%0.3f' % (
max_add_curr, sig_min_curr, sig_max_factor_curr)
try:
if not os.path.exists('masks/'+case_text+'/'+img_fn+'/'):
os.makedirs('masks/'+case_text+'/'+img_fn+'/')
except:
pass
sampling_helpers.create_and_save_largercopymask(mask * 255, 'masks/'+case_text+'/'+img_fn+'/'+case_text+params_text+'_'+img_fn+'.png', 1)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
% Copyright (c) 2020 by %
% Chair of Multimedia Communications and Signal Processing %
% Friedrich-Alexander-Universität Erlangen-Nürnberg (FAU) %
% - all rights reserved - %
% %
% YOU ARE USING THIS PROGRAM AT YOUR OWN RISK! THE AUTHOR %
% IS NOT RESPONSIBLE FOR ANY DAMAGE OR DATA-LOSS CAUSED BY THE %
% USE OF THIS PROGRAM. %
% %
% %
% If you have any questions please contact: %
% %
% Simon Grosche, M.Sc. %
% Multimedia Communications and Signal Processing %
% University of Erlangen-Nuremberg %
% Cauerstr. 7 %
% 91058 Erlangen, Germany %
% %
% email: simon.grosche @ fau.de %
This helper file is called from generate_patterns.py
"""
import numpy as np
import numba
import time
#parameters for the adaptive sampling
class params():
def __init__(self,max_add,ld_dens,des_dens,var_border,tau=7,w_max=32,k_A=2):
self.max_add=max_add
self.ld_dens=ld_dens
self.des_dens=des_dens
self.var_border=var_border
self.tau=tau
self.w_max=w_max
self.k_A=k_A
@numba.jit(nopython=True)
def get_var_map_weighted(mask,img,border):
"""
calculate a variance map from a given image, mask and border width.
A Gaussian weighting function is applied to the samples.
"""
# w=np.zeros((border+1,border+1))
# for i in range(border+1):
# for j in range(border+1):
# w[i,j]=i**2+j**2
# w=np.concatenate((w[:,-1:0:-1],w),axis=1)
# w=np.concatenate((w[-1:0:-1,:],w),axis=0)
# w=np.exp(-w/(border**2))
sy,sx=mask.shape
#mask=mask.astype(np.uint8)
out=np.zeros(mask.shape)
for i_y in range(sy):
by0=border
if border>i_y:
by0=i_y
for i_x in range(sx):
bx0=border
if border>i_x:
bx0=i_x
#get current array
m_arr=mask[i_y-by0:i_y+border+1,i_x-bx0:i_x+border+1]
arr=img[i_y-by0:i_y+border+1,i_x-bx0:i_x+border+1]
# arr2=arr2[m_arr2==1]
# if arr2.size!=0:
# out[i_y,i_x]=arr2.var()
Y,X=np.where(m_arr==1)
#ind=np.array(np.where(m_arr==1))
N=Y.size
if N>1:
vals=np.zeros(N)
weights=np.zeros(N)
for i,y in enumerate(Y):
x=X[i]
vals[i]=arr[y,x]
weights[i]=np.exp(-((x-border)**2+(y-border)**2)/(border**2))
mu=np.mean(vals)
weights=weights/np.max(weights)
out[i_y,i_x]=np.sum(weights*(vals-mu)**2)/N
return out
@numba.jit(nopython=True)
def get_gauss(std,tau=7):
"""
returns a 2D modified Gauss function with a given sigma (std) and tau (tau)
"""
gauss=np.zeros((1,1))
border=0
tmp=(1-np.exp(-((border+1)**2)/std**2))**tau
while tmp!=1:
border+=1
arr=np.zeros((border+1,border+1))
arr[0:border,0:border]=gauss
#set bottom left and top right
arr[0,border]=tmp
arr[border,0]=tmp
#fill the rest
d1=border
d2_arr=np.arange(1,border+1)
for d2 in d2_arr:
val=(1-np.exp(-(d1**2+d2**2)/std**2))**tau
arr[d1,d2]=val
arr[d2,d1]=val
#update 'gauss'
gauss=np.copy(arr)
tmp=(1-np.exp(-((border+1)**2)/std**2))**tau
#mirror and concatenate
gauss_flip=gauss[:,::-1]
gauss=np.concatenate((gauss_flip[:,:-1],gauss),axis=1)
gauss_flip=gauss[::-1]
gauss=np.concatenate((gauss_flip[:-1,:],gauss),axis=0)
return gauss
def draw_from_pdf(pdf,n):
"""
draw n samples from a given pdf; no sample can be drawn twice
"""
pdf=pdf/np.sum(pdf)
pdf_shape=pdf.shape
pdf=pdf.ravel()
idx=np.arange(len(pdf))
#replace=True --> no sample is drawn twice
draw_y,draw_x=np.unravel_index(np.random.choice(idx,n,replace=False,p=pdf),pdf_shape)
return(np.array((draw_y,draw_x))).T
@numba.jit(nopython=True)
def get_stdmap_for_v5(vmap,lim,stdmin,stdmax):
"""
calculate the local standard deviation (stdmap) from the variance map,
lambda (lim) sigma_min (stdmin) and sigma_max (stdmax)
"""
vm=vmap.ravel()
vmin=np.min(vm[vm>0])
vm[vm==0]=vmin
log_vmap=-np.log10(vm)
v,b=np.histogram(log_vmap,int(vmap.size/100))
# v=v[::-1]
# b=b[::-1]
cs=np.cumsum(v)
cs=cs/cs[-1]
#ind: value where cs reaches lim
ind=np.argmax(cs>=lim)
val=(b[ind]+b[ind+1])/2
#mu=np.median(log_vmap[log_vmap>=val])
#!!!
mu=val
print(mu)
log_vmap[log_vmap>=val]=mu # changed to >= for v3
log_vmap=log_vmap/val
# fun=cs[:ind]
# fun=fun/fun[-1]
# lvm=(ind-1)*log_vmap
# lvm=lvm.astype(np.int64)
# lvm=fun[lvm]
lvm=log_vmap
print(np.var(lvm))
stdmap=stdmin+(stdmax-stdmin)*lvm
stdmap=np.reshape(stdmap,vmap.shape)
return stdmap
def find_gstds_simplified(density,tau,max_radius,k, gstd_min=0.2, gstd_max_scaling=0.85):
"""
calculate sigma_min and sigma_max
"""
# gstd_min=0.2 comes right from the optional parameter
gstd_upper_bound=1200
gstd_max=gstd_max_scaling/np.sqrt(density)-gstd_min
if gstd_max>gstd_upper_bound:
gstd_max=gstd_upper_bound
return gstd_min,gstd_max
@numba.jit(nopython=True)
def get_decision_map(mask, stdmap, tau):
"""
calculates a decision map (discrete probability density function) from a given
mask, local standard deviation (stdmap) and tau
"""
sy, sx = mask.shape
pdf = np.ones_like(mask)
centerY, centerX = np.where(mask == 1)
for i, cy in enumerate(centerY):
# print(i)
cx = centerX[i]
std = stdmap[cy, cx]
gauss = get_gauss(std, tau)
border = int((gauss.shape[0]-1) / 2)
xx = pdf[cy-border:cy+border+1, cx-border:cx+border+1]
if xx.shape == gauss.shape:
pdf[cy-border:cy+border+1, cx-border:cx+border+1] *= gauss
else:
bx1 = border
if cx < border:
bx1 = cx
by1 = border
if cy < border:
by1 = cy
bx2 = border
tmp = pdf.shape[1]-cx-1
if tmp < border:
bx2 = tmp
by2 = border
tmp = pdf.shape[0]-cy-1
if tmp < border:
by2 = tmp
gauss_c = gauss[border-by1:border+by2+1, border-bx1:border+bx2+1]
# print(gauss_c.shape)
val1 = cy-border
if val1 < 0:
val1 = 0
val2 = cx-border
if val2 < 0:
val2 = 0
pdf[val1:cy+border+1,
val2:cx+border+1] *= gauss_c
return pdf
def adaptive_sampling_mask_simplified5(img, sample_params, mask_initial):
"""
calculate an adaptive mask from a given image (img) and the corresponding
parameters (sample_params)
"""
max_add = int(img.shape[0] * img.shape[1] * sample_params.max_add)
# initialize mask as random mask with low density
mask = 1 * mask_initial
# initialise counter for samples; has to reach 0
sample_counter = int(np.round(sample_params.des_dens * img.shape[0] * img.shape[1]))
num_samples = np.count_nonzero(mask)
sample_counter -= num_samples
strlen = len(str(sample_counter))
t1 = time.time()
while sample_counter > 0:
sc = str(sample_counter).zfill(strlen)
print('\r samples to set: '+sc, flush=False, end='\n')
# sample image with current mask
img_masked = img * mask
# calculate variance map and normalise
vmap = get_var_map_weighted(mask, img_masked, sample_params.var_border)
vmap = vmap / np.max(vmap)
gstd_min, gstd_max = find_gstds_simplified(num_samples / vmap.size, None, None, None,
gstd_min=sample_params.sig_min_curr,
gstd_max_scaling=sample_params.sig_max_factor_curr)
gstd_max = gstd_max+gstd_min
# calculate std map
stdmap = get_stdmap_for_v5(vmap, .98, gstd_min, gstd_max)
# calculate decision map
dmap = get_decision_map(mask, stdmap, sample_params.tau)
# Determine how much samples to draw
if sample_counter < max_add:
add_samples = sample_counter
else:
add_samples = max_add
# draw random samples from dmap (=pdf)
sample_list = draw_from_pdf(dmap, add_samples)
# update sample counter
sample_counter -= add_samples
num_samples += add_samples
# add new samples to mask
mask[sample_list[:, 0], sample_list[:, 1]] = 1
t2 = time.time()
print('needed time %0.2f for %0.2f' % (t2-t1, sample_params.des_dens))
return mask
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
% Copyright (c) 2020 by %
% Chair of Multimedia Communications and Signal Processing %
% Friedrich-Alexander-Universität Erlangen-Nürnberg (FAU) %
% - all rights reserved - %
% %
% YOU ARE USING THIS PROGRAM AT YOUR OWN RISK! THE AUTHOR %
% IS NOT RESPONSIBLE FOR ANY DAMAGE OR DATA-LOSS CAUSED BY THE %
% USE OF THIS PROGRAM. %
% %
% %
% If you have any questions please contact: %
% %
% Simon Grosche, M.Sc. %
% Multimedia Communications and Signal Processing %
% University of Erlangen-Nuremberg %
% Cauerstr. 7 %
% 91058 Erlangen, Germany %
% %
% email: simon.grosche @ fau.de %
This helper file is called from generate_patterns.py
It implements several helper-functions for incremental sampling as well as an optimized incremental sampling.
"""
import numpy as np
import numba
import math
from scipy import misc
def create_maskfromcopymask(copymask, sy=2400, sx=2400):
"""
Create a mask by repeatedly using a smaller mask (copymask).
"""
if sy % 2 != 0 or sx % 2 != 0:
raise ValueError('Size needs to be divisible by 2')
cy, cx = np.shape(copymask)
ny, nx = sy // cy+1, sx // cx+1
mask = np.zeros((ny * cy, nx * cx), dtype=np.float)
for y in range(ny):
for x in range(nx):
mask[y * cy:y * cy+cy, x * cx:x * cx+cx] = copymask
return mask[0:sy, 0:sx]
def validate_random_mask(mask, density):
n_expected = int(np.round(density * np.prod(np.size(mask))))
if n_expected != np.sum(mask == 1):
raise ValueError(
'Number of sampled data points (%d) of this mask does not match the expected number (%d) according to the given density (%0.2f).' % (
np.sum(mask == 1), n_expected, density))
def create_random_mask(s, density):
# this function should create a 2d random sampling mask with given density
assert (len(s) == 2)
new_mask = np.zeros(s)
n_samples = int(np.round(density * np.prod(s))) # calc number of needed samples
i = 0
while i < n_samples:
iy = np.random.randint(low=0, high=s[0])
ix = np.random.randint(low=0, high=s[1])
if new_mask[iy, ix] == 0:
new_mask[iy, ix] = 1
i = i+1
validate_random_mask(new_mask, density)
return new_mask
def create_and_save_largercopymask(mask, fname, factor):
sy, sx = np.shape(mask)
maskcopy = create_maskfromcopymask(mask, sy=factor * sy,
sx=factor * sx) ## tested, that this tha 4*4* the property values from before.
misc.imsave(fname+'copiedperiodically'+'.png', maskcopy)
return maskcopy
@numba.jit(nopython=True)
def gaussian_density(dy, dx, y0, x0, sigm, tau):
pdf = np.ones((dy, dx), dtype=np.float64)
pdf_precalc = np.ones((8, 8), dtype=np.float64)
for y in range(8):
for x in range(8):
pdf_precalc[y, x] = (1-np.exp(-(y ** 2+x ** 2) / sigm)) ** tau
for i in range(dy):
y = dy // 2-abs(dy // 2-np.abs(i-y0))
if y < 8:
for j in range(dx):
x = dx // 2-abs(dx // 2-np.abs(j-x0))
# pdf[i,j] = (1-np.exp(-(y**2+x**2)/5))**5 # seems to be a somewhat nice function. Is has low regularity according to ftvar..... parameters, but somewhat high discrepancy.
if x < 8:
pdf[i, j] = pdf_precalc[y, x]
pdf[y0, x0] = 0
return pdf / np.max(pdf)
@numba.jit(nopython=True)
def gaussian_density_nonperiodic(dy, dx, y0, x0, sigm, tau, pdf_precalc):
pdf = np.ones((dy, dx), dtype=np.float64)
for i in np.arange(y0-7, y0+8):
y = abs(i-y0)
if i < dy and i >= 0:
for j in np.arange(x0-7, x0