Skip to content

Commit

Permalink
Merge pull request #58 from qutech/hotfix/cache_intermediates_gradient
Browse files Browse the repository at this point in the history
Hotfix gradient with cache intermediates
  • Loading branch information
thangleiter authored Mar 5, 2021
2 parents cde8258 + de02256 commit dc1ccd2
Show file tree
Hide file tree
Showing 4 changed files with 99 additions and 32 deletions.
6 changes: 3 additions & 3 deletions filter_functions/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
# =============================================================================
"""Package for efficient calculation of generalized filter functions"""

from . import analytic, basis, numeric, pulse_sequence, superoperator, util
from . import analytic, basis, gradient, numeric, pulse_sequence, superoperator, util
from .basis import Basis
from .gradient import infidelity_derivative
from .numeric import error_transfer_matrix, infidelity
Expand All @@ -29,9 +29,9 @@

__all__ = ['Basis', 'PulseSequence', 'analytic', 'basis', 'concatenate', 'concatenate_periodic',
'error_transfer_matrix', 'extend', 'infidelity', 'liouville_representation', 'numeric',
'pulse_sequence', 'remap', 'util', 'superoperator', 'infidelity_derivative']
'gradient', 'pulse_sequence', 'remap', 'util', 'superoperator', 'infidelity_derivative']


__version__ = '1.0.1'
__version__ = '1.0.2'
__license__ = 'GNU GPLv3+'
__author__ = 'Quantum Technology Group, RWTH Aachen University'
5 changes: 3 additions & 2 deletions filter_functions/gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -487,7 +487,8 @@ def calculate_derivative_of_control_matrix_from_scratch(
basis_transformed = numeric._transform_by_unitary(eigvecs[:, None], basis[None],
out=np.empty((n_dt, d**2, d, d), complex))
c_opers_transformed = numeric._transform_hamiltonian(eigvecs, c_opers[idx]).swapaxes(0, 1)
if intermediates is None:
if not intermediates:
# None or empty
n_opers_transformed = numeric._transform_hamiltonian(eigvecs, n_opers,
n_coeffs).swapaxes(0, 1)
exp_buf, integral = np.empty((2, n_omega, d, d), dtype=complex)
Expand All @@ -507,7 +508,7 @@ def calculate_derivative_of_control_matrix_from_scratch(
n_opers.shape, (len(omega), d, d),
optimize=[(0, 3), (0, 1), (0, 1)])
for g in range(n_dt):
if intermediates is None:
if not intermediates:
integral = numeric._first_order_integral(omega, eigvals[g], dt[g], exp_buf, integral)
else:
integral = intermediates['first_order_integral'][g]
Expand Down
88 changes: 61 additions & 27 deletions filter_functions/numeric.py
Original file line number Diff line number Diff line change
Expand Up @@ -475,25 +475,26 @@ def calculate_noise_operators_from_scratch(
t: Optional[Coefficients] = None,
show_progressbar: bool = False,
cache_intermediates: bool = False
) -> ndarray:
) -> Union[ndarray, Tuple[ndarray, Dict[str, ndarray]]]:
r"""
Calculate the noise operators in interaction picture from scratch.
Parameters
----------
eigvals: array_like, shape (n_dt, d)
Eigenvalue vectors for each time pulse segment *g* with the first
axis counting the pulse segment, i.e.
Eigenvalue vectors for each time pulse segment *g* with the
first axis counting the pulse segment, i.e.
``eigvals == array([D_0, D_1, ...])``.
eigvecs: array_like, shape (n_dt, d, d)
Eigenvector matrices for each time pulse segment *g* with the first
axis counting the pulse segment, i.e.
Eigenvector matrices for each time pulse segment *g* with the
first axis counting the pulse segment, i.e.
``eigvecs == array([V_0, V_1, ...])``.
propagators: array_like, shape (n_dt+1, d, d)
The propagators :math:`Q_g = P_g P_{g-1}\cdots P_0` as a (d, d) array
with *d* the dimension of the Hilbert space.
The propagators :math:`Q_g = P_g P_{g-1}\cdots P_0` as a (d, d)
array with *d* the dimension of the Hilbert space.
omega: array_like, shape (n_omega,)
Frequencies at which the pulse control matrix is to be evaluated.
Frequencies at which the pulse control matrix is to be
evaluated.
n_opers: array_like, shape (n_nops, d, d)
Noise operators :math:`B_\alpha`.
n_coeffs: array_like, shape (n_nops, n_dt)
Expand All @@ -507,12 +508,19 @@ def calculate_noise_operators_from_scratch(
computed from *dt*.
show_progressbar: bool, optional
Show a progress bar for the calculation.
cache_intermediates: bool, optional
Keep and return intermediate terms of the calculation that can
be reused in other computations (second order or gradients).
Otherwise the sum is performed in-place. The default is False.
Returns
-------
noise_operators: ndarray, shape (n_omega, n_nops, d, d)
The interaction picture noise operators
:math:`\tilde{B}_\alpha(\omega)`.
intermediates: dict[str, ndarray]
Intermediate results of the calculation. Only if
cache_intermediates is True.
Notes
-----
Expand Down Expand Up @@ -700,7 +708,7 @@ def calculate_control_matrix_from_scratch(
show_progressbar: bool = False,
cache_intermediates: bool = False,
out: Optional[ndarray] = None
) -> Union[ndarray, Dict[str, ndarray]]:
) -> Union[ndarray, Tuple[ndarray, Dict[str, ndarray]]]:
r"""
Calculate the control matrix from scratch, i.e. without knowledge of
the control matrices of more atomic pulse sequences.
Expand Down Expand Up @@ -738,10 +746,9 @@ def calculate_control_matrix_from_scratch(
show_progressbar: bool, optional
Show a progress bar for the calculation.
cache_intermediates: bool, optional
Keep and return intermediate terms
:math:`\mathcal{G}^{(g)}(\omega)` of the sum so that
:math:`\mathcal{B}(\omega)=\sum_g\mathcal{G}^{(g)}(\omega)`.
Otherwise the sum is performed in-place.
Keep and return intermediate terms of the calculation that can
be reused in other computations (second order or gradients).
Otherwise the sum is performed in-place. The default is False.
out: ndarray, optional
A location into which the result is stored. See
:func:`numpy.ufunc`.
Expand Down Expand Up @@ -934,7 +941,8 @@ def calculate_cumulant_function(
decay_amplitudes: Optional[ndarray] = None,
frequency_shifts: Optional[ndarray] = None,
show_progressbar: bool = False,
memory_parsimonious: bool = False
memory_parsimonious: bool = False,
cache_intermediates: bool = False
) -> ndarray:
r"""Calculate the cumulant function :math:`\mathcal{K}(\tau)`.
Expand Down Expand Up @@ -984,6 +992,11 @@ def calculate_cumulant_function(
Trade memory footprint for performance. See
:func:`~numeric.calculate_decay_amplitudes`. The default is
``False``.
cache_intermediates: bool, optional
Keep and return intermediate terms of the calculation of the
control matrix that can be reused in other computations (second
order or gradients). Otherwise the sum is performed in-place.
The default is False.
Returns
-------
Expand Down Expand Up @@ -1063,9 +1076,8 @@ def calculate_cumulant_function(

if decay_amplitudes is None:
decay_amplitudes = calculate_decay_amplitudes(pulse, spectrum, omega, n_oper_identifiers,
which, show_progressbar,
cache_intermediates=second_order,
memory_parsimonious=memory_parsimonious)
which, show_progressbar, cache_intermediates,
memory_parsimonious)

if second_order:
if frequency_shifts is None:
Expand Down Expand Up @@ -1694,7 +1706,8 @@ def error_transfer_matrix(
second_order: bool = False,
cumulant_function: Optional[ndarray] = None,
show_progressbar: bool = False,
memory_parsimonious: bool = False
memory_parsimonious: bool = False,
cache_intermediates: bool = False
) -> ndarray:
r"""Compute the error transfer matrix up to unitary rotations.
Expand Down Expand Up @@ -1735,6 +1748,11 @@ def error_transfer_matrix(
Trade memory footprint for performance. See
:func:`~numeric.calculate_decay_amplitudes`. The default is
``False``.
cache_intermediates: bool, optional
Keep and return intermediate terms of the calculation of the
control matrix (if it is not already cached) that can be reused
for second order or gradients. Can consume large amount of
memory, but speed up the calculation.
Returns
-------
Expand Down Expand Up @@ -1787,7 +1805,8 @@ def error_transfer_matrix(
cumulant_function = calculate_cumulant_function(pulse, spectrum, omega,
n_oper_identifiers, 'total', second_order,
show_progressbar=show_progressbar,
memory_parsimonious=memory_parsimonious)
memory_parsimonious=memory_parsimonious,
cache_intermediates=cache_intermediates)

try:
# agnostic of the specific shape of cumulant_function, just sum over everything except for
Expand All @@ -1804,11 +1823,17 @@ def error_transfer_matrix(


@util.parse_optional_parameters({'which': ('total', 'correlations')})
def infidelity(pulse: 'PulseSequence', spectrum: Union[Coefficients, Callable],
omega: Union[Coefficients, Dict[str, Union[int, str]]],
n_oper_identifiers: Optional[Sequence[str]] = None,
which: str = 'total', return_smallness: bool = False,
test_convergence: bool = False) -> Union[ndarray, Any]:
def infidelity(
pulse: 'PulseSequence',
spectrum: Union[Coefficients, Callable],
omega: Union[Coefficients, Dict[str, Union[int, str]]],
n_oper_identifiers: Optional[Sequence[str]] = None,
which: str = 'total',
show_progressbar: bool = False,
cache_intermediates: bool = False,
return_smallness: bool = False,
test_convergence: bool = False
) -> Union[ndarray, Any]:
r"""Calculate the leading order entanglement infidelity.
This function calculates the infidelity approximately from the
Expand Down Expand Up @@ -1861,6 +1886,12 @@ def infidelity(pulse: 'PulseSequence', spectrum: Union[Coefficients, Callable],
functions have been computed during concatenation (see
:func:`calculate_pulse_correlation_filter_function` and
:func:`~filter_functions.pulse_sequence.concatenate`).
show_progressbar: bool, optional
Show a progressbar for the calculation of the control matrix.
cache_intermediates: bool, optional
Keep and return intermediate terms of the calculation of the
control matrix (if it is not already cached) that can be reused
for second order or gradients. The default is False.
return_smallness: bool, optional
Return the smallness parameter :math:`\xi` for the given
spectrum.
Expand Down Expand Up @@ -1998,7 +2029,8 @@ def infidelity(pulse: 'PulseSequence', spectrum: Union[Coefficients, Callable],
freqs = xspace(omega_IR, omega_UV, n//2)
convergence_infids[i] = infidelity(pulse, spectrum(freqs), freqs,
n_oper_identifiers=n_oper_identifiers,
which='total', return_smallness=False,
which='total', show_progressbar=show_progressbar,
cache_intermediates=False, return_smallness=False,
test_convergence=False)

return n_samples, convergence_infids
Expand All @@ -2012,11 +2044,13 @@ def infidelity(pulse: 'PulseSequence', spectrum: Union[Coefficients, Callable],
traces_diag = (sparse.diagonal(traces, axis1=2, axis2=3).sum(-1) -
sparse.diagonal(traces, axis1=1, axis2=3).sum(-1)).todense()

control_matrix = pulse.get_control_matrix(omega)
control_matrix = pulse.get_control_matrix(omega, show_progressbar, cache_intermediates)
filter_function = np.einsum('ako,blo,kl->abo',
control_matrix.conj(), control_matrix, traces_diag)/pulse.d
else:
filter_function = pulse.get_filter_function(omega)
filter_function = pulse.get_filter_function(omega, which='fidelity',
show_progressbar=show_progressbar,
cache_intermediates=cache_intermediates)
else:
# which == 'correlations'
if not pulse.basis.istraceless:
Expand Down
32 changes: 32 additions & 0 deletions tests/test_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,38 @@ def test_gradient_calculation_random_pulse(self):
)
self.assertArrayAlmostEqual(ana_grad, fin_diff_grad, rtol=1e-6, atol=1e-8)

def test_caching(self):
"""Make sure calculation works with or without cached intermediates."""

for d, n_dt in zip(testutil.rng.integers(2, 5, 5), testutil.rng.integers(2, 8, 5)):
pulse = testutil.rand_pulse_sequence(d, n_dt)
omega = ff.util.get_sample_frequencies(pulse, n_samples=27)
spect = 1/omega

# Cache control matrix but not intermediates
pulse.cache_control_matrix(omega, cache_intermediates=False)
infid_nocache = ff.infidelity(pulse, spect, omega, cache_intermediates=False)
infid_cache = ff.infidelity(pulse, spect, omega, cache_intermediates=True)

self.assertArrayAlmostEqual(infid_nocache, infid_cache)

cm_nocache = ff.gradient.calculate_derivative_of_control_matrix_from_scratch(
omega, pulse.propagators, pulse.eigvals, pulse.eigvecs, pulse.basis, pulse.t,
pulse.dt, pulse.n_opers, pulse.n_coeffs, pulse.c_opers, pulse.c_oper_identifiers,
intermediates=dict()
)

pulse.cleanup('frequency dependent')
pulse.cache_control_matrix(omega, cache_intermediates=True)
cm_cache = ff.gradient.calculate_derivative_of_control_matrix_from_scratch(
omega, pulse.propagators, pulse.eigvals, pulse.eigvecs, pulse.basis, pulse.t,
pulse.dt, pulse.n_opers, pulse.n_coeffs, pulse.c_opers, pulse.c_oper_identifiers,
intermediates=pulse._intermediates
)

self.assertArrayAlmostEqual(cm_nocache, cm_cache)


def test_raises(self):
pulse = testutil.rand_pulse_sequence(2, 3)
omega = ff.util.get_sample_frequencies(pulse, n_samples=13)
Expand Down

0 comments on commit dc1ccd2

Please sign in to comment.