Hello, when I reproduce your code of single photon imaging part, I don't quite understand the update part of x.
def inverse_step(u, v, K1, K, rho):
xtilde = v - u
x = np.copy(xtilde)
K0 = np.square(K) - K1
indices_0 = (K1 == 0)
x[indices_0] = xtilde[indices_0] - K0[indices_0] / rho
func = lambda y: K1 / (np.exp(y) - 1) - rho*y - K0 + rho*xtilde
indices_1 = np.logical_not(indices_0)
# binary search?
bmin = 1e-5 * np.ones_like(x, dtype=np.float64)
bmax = 100 * np.ones_like(x, dtype=np.float64)
bave = (bmin + bmax) / 2.0
for i in range(30):
tmp = func(bave)
indices_pos = np.logical_and(tmp > 0, indices_1)
indices_neg = np.logical_and(tmp < 0, indices_1)
indices_zero = np.logical_and(tmp == 0, indices_1)
indices_0 = np.logical_or(indices_0, indices_zero)
indices_1 = np.logical_not(indices_0)
bmin[indices_pos] = bave[indices_pos]
bmax[indices_neg] = bave[indices_neg]
bave[indices_1] = (bmin[indices_1] + bmax[indices_1]) / 2.0
x[K1 != 0] = bave[K1 != 0]
return np.clip(x, 0.0, 1.0)
Does it use dichotomy to find the minimum value? What's its advantage over making the derivative equal to zero? Looking forward to your reply
Hello, when I reproduce your code of single photon imaging part, I don't quite understand the update part of x.
def inverse_step(u, v, K1, K, rho):
xtilde = v - u
x = np.copy(xtilde)
K0 = np.square(K) - K1
Does it use dichotomy to find the minimum value? What's its advantage over making the derivative equal to zero? Looking forward to your reply