Skip to content

Commit

Permalink
hide warning to improve logs quality
Browse files Browse the repository at this point in the history
  • Loading branch information
perrotcap committed Oct 2, 2024
1 parent 41a2fb3 commit 2d8b13a
Showing 1 changed file with 40 additions and 37 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -114,43 +114,46 @@ def get_dsmooth_dvariable(cst, alpha=3):


def get_dsmooth_dvariable_vect(cst, alpha=3):
cst_array = np.array(cst)
max_exp = 650.0 # max value for exponent input, higher value gives infinity
min_exp = -300
alphaxcst = alpha * cst_array

max_alphax = np.amax(alphaxcst, axis=1)

k = max_alphax - max_exp
exp_func = np.maximum(min_exp, alpha * cst_array -
np.repeat(k, cst_array.shape[1]).reshape(cst_array.shape))
den = np.sum(np.exp(exp_func), axis=1)
num = np.sum(cst_array * np.exp(exp_func), axis=1)

# Vectorized calculation
exp_func = np.maximum(min_exp, alpha * cst_array -
np.repeat(k, cst_array.shape[1]).reshape(cst_array.shape))
dden = alpha * np.exp(exp_func)
dnum = cst_array * (alpha * np.exp(exp_func)
) + np.exp(exp_func)
grad_value = dnum / np.repeat(den, cst_array.shape[1]).reshape(cst_array.shape) - (np.repeat(num, cst_array.shape[1]).reshape(
cst_array.shape) / np.repeat(den, cst_array.shape[1]).reshape(cst_array.shape)) * (dden / np.repeat(den, cst_array.shape[1]).reshape(cst_array.shape))

# Special case for max element
max_elem = np.amax(cst_array * np.sign(alpha), axis=1) * np.sign(alpha)
non_max_idx = np.array([cst_array[i] != max_elem[i]
for i in np.arange(cst_array.shape[0])]).reshape(cst_array.shape[0], cst_array.shape[1])
dden_max = np.sum(-alpha * non_max_idx *
np.exp(np.maximum(min_exp, alpha * cst_array - np.repeat(k, cst_array.shape[1]).reshape(cst_array.shape))), axis=1)
dnum_max = np.sum(-alpha * cst_array * non_max_idx *
np.exp(np.maximum(min_exp, alpha * cst_array - np.repeat(k, cst_array.shape[1]).reshape(cst_array.shape))), axis=1)
# derivative of den wto cstmax is 0
dden_max = dden_max + 0.0
dnum_max = dnum_max + 1.0 * np.exp(alpha * max_elem - k)
grad_val_max = dnum_max / den - (num / den) * (dden_max / den)

for i in np.arange(cst_array.shape[0]):
grad_value[i][np.logical_not(non_max_idx)[i]] = grad_val_max[i]
with warnings.catch_warnings():
warnings.simplefilter("ignore")

cst_array = np.array(cst)
max_exp = 650.0 # max value for exponent input, higher value gives infinity
min_exp = -300
alphaxcst = alpha * cst_array

max_alphax = np.amax(alphaxcst, axis=1)

k = max_alphax - max_exp
exp_func = np.maximum(min_exp, alpha * cst_array -
np.repeat(k, cst_array.shape[1]).reshape(cst_array.shape))
den = np.sum(np.exp(exp_func), axis=1)
num = np.sum(cst_array * np.exp(exp_func), axis=1)

# Vectorized calculation
exp_func = np.maximum(min_exp, alpha * cst_array -
np.repeat(k, cst_array.shape[1]).reshape(cst_array.shape))
dden = alpha * np.exp(exp_func)
dnum = cst_array * (alpha * np.exp(exp_func)
) + np.exp(exp_func)
grad_value = dnum / np.repeat(den, cst_array.shape[1]).reshape(cst_array.shape) - (np.repeat(num, cst_array.shape[1]).reshape(
cst_array.shape) / np.repeat(den, cst_array.shape[1]).reshape(cst_array.shape)) * (dden / np.repeat(den, cst_array.shape[1]).reshape(cst_array.shape))

# Special case for max element
max_elem = np.amax(cst_array * np.sign(alpha), axis=1) * np.sign(alpha)
non_max_idx = np.array([cst_array[i] != max_elem[i]
for i in np.arange(cst_array.shape[0])]).reshape(cst_array.shape[0], cst_array.shape[1])
dden_max = np.sum(-alpha * non_max_idx *
np.exp(np.maximum(min_exp, alpha * cst_array - np.repeat(k, cst_array.shape[1]).reshape(cst_array.shape))), axis=1)
dnum_max = np.sum(-alpha * cst_array * non_max_idx *
np.exp(np.maximum(min_exp, alpha * cst_array - np.repeat(k, cst_array.shape[1]).reshape(cst_array.shape))), axis=1)
# derivative of den wto cstmax is 0
dden_max = dden_max + 0.0
dnum_max = dnum_max + 1.0 * np.exp(alpha * max_elem - k)
grad_val_max = dnum_max / den - (num / den) * (dden_max / den)

for i in np.arange(cst_array.shape[0]):
grad_value[i][np.logical_not(non_max_idx)[i]] = grad_val_max[i]

return grad_value

Expand Down

0 comments on commit 2d8b13a

Please sign in to comment.