Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Small improvements #66

Merged
merged 29 commits into from
Jul 8, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
371b719
Use dot_HS in oper_equiv for conciseness
thangleiter Apr 21, 2021
977ea82
Add hermitian arg to basis expansions. Results in real output
thangleiter Apr 11, 2021
3bbb799
Fix typo in docstring
thangleiter Apr 11, 2021
391567c
Only compute transformed opers if return value is None
thangleiter Apr 15, 2021
ea215db
Use hermitian kwarg in expansion
thangleiter Apr 15, 2021
68130df
Improve liouville_representation
thangleiter Apr 15, 2021
338ac9c
Remove unused import
thangleiter May 4, 2021
dc1dc6b
Simpler syntax for util.parse_optional_parameters
thangleiter May 4, 2021
0ad72df
Drop util.parse_which_FF_parameter
thangleiter May 4, 2021
a004102
More sensible n_samples default
thangleiter May 5, 2021
2c55ed1
Factor out qutip check
thangleiter May 5, 2021
836e55c
Fix number of points for convergence test in infidelity
thangleiter May 6, 2021
dfea9a7
Improve error handling in concatenate_without_filter_function
thangleiter May 6, 2021
455ed33
Improve __str__ method
thangleiter May 6, 2021
e543343
Add __deepcopy__ method
thangleiter May 7, 2021
5b4b1cf
Also cache fidelity FF if caching generalized FF
thangleiter May 10, 2021
accd7e0
Allow ndarray for basis_labels
thangleiter May 11, 2021
7cf65eb
Add optional axes arg to plot_infidelity_convergence
thangleiter May 20, 2021
07a45bb
Better error message
thangleiter May 20, 2021
d2ccc56
Merge branch 'master' into feature/small_improvements
thangleiter May 20, 2021
1cff6a9
Test for optional ax arg
thangleiter May 20, 2021
f4d77d3
Clean up concatenate_periodic()
thangleiter May 25, 2021
c95aaa8
Use linalg.solve() instead of explicitly invert.
thangleiter May 25, 2021
10bbeb9
initial kwarg of accumulate was only introduced in 3.8
thangleiter May 25, 2021
9eb3eb9
Actually use n samples when test_convergence=True
thangleiter May 25, 2021
e8cb45d
Remove unnecessary :func: decorations in See Also's
thangleiter May 25, 2021
7f8f148
Allow for (in principle) n_opers with finite trace
thangleiter Jun 14, 2021
ba4a819
Get rid of redundant computation of propagator products
thangleiter Jun 22, 2021
129b5ba
Merge remote-tracking branch 'origin/master' into feature/small_impro…
thangleiter Jun 25, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 24 additions & 12 deletions filter_functions/basis.py
Original file line number Diff line number Diff line change
Expand Up @@ -568,7 +568,7 @@ def _full_from_partial(elems: Sequence, traceless: bool, labels: Sequence[str])
else:
ggm = Basis.ggm(elems.d)

coeffs = expand(elems, ggm, tidyup=True)
coeffs = expand(elems, ggm, hermitian=elems.isherm, tidyup=True)
# Throw out coefficient vectors that are all zero (should only happen for
# the identity)
coeffs = coeffs[(coeffs != 0).any(axis=-1)]
Expand Down Expand Up @@ -637,7 +637,7 @@ def normalize(b: Basis) -> Basis:


def expand(M: Union[ndarray, Basis], basis: Union[ndarray, Basis],
normalized: bool = True, tidyup: bool = False) -> ndarray:
normalized: bool = True, hermitian: bool = False, tidyup: bool = False) -> ndarray:
r"""
Expand the array *M* in the basis given by *basis*.

Expand All @@ -650,6 +650,9 @@ def expand(M: Union[ndarray, Basis], basis: Union[ndarray, Basis],
The basis of shape (m, d, d) in which to expand.
normalized: bool {True}
Wether the basis is normalized.
hermitian: bool (default: False)
If M is hermitian along its last two axes, the result will be
real.
tidyup: bool {False}
Whether to set values below the floating point eps to zero.

Expand All @@ -673,15 +676,18 @@ def expand(M: Union[ndarray, Basis], basis: Union[ndarray, Basis],
{\mathrm{tr}\big(C_j^\dagger C_j\big)}.

"""
coefficients = np.tensordot(M, basis, axes=[(-2, -1), (-1, -2)])

def cast(arr): return arr.real if hermitian and basis.isherm else arr

coefficients = cast(np.tensordot(M, basis, axes=[(-2, -1), (-1, -2)]))
if not normalized:
coefficients /= np.einsum('bij,bji->b', basis, basis).real
coefficients /= cast(np.einsum('bij,bji->b', basis, basis))

return util.remove_float_errors(coefficients) if tidyup else coefficients


def ggm_expand(M: Union[ndarray, Basis], traceless: bool = False) -> ndarray:
def ggm_expand(M: Union[ndarray, Basis], traceless: bool = False,
hermitian: bool = False) -> ndarray:
r"""
Expand the matrix *M* in a Generalized Gell-Mann basis [Bert08]_.
This function makes use of the explicit construction prescription of
Expand All @@ -699,11 +705,14 @@ def ggm_expand(M: Union[ndarray, Basis], traceless: bool = False) -> ndarray:
expansion. If it is known beforehand that M is traceless, the
corresponding coefficient is zero and thus doesn't need to be
computed.
hermitian: bool (default: False)
If M is hermitian along its last two axes, the result will be
real.

Returns
-------
coefficients: ndarray
The coefficient array with shape (d**2,) or (..., d**2)
The real coefficient array with shape (d**2,) or (..., d**2)

References
----------
Expand All @@ -715,6 +724,8 @@ def ggm_expand(M: Union[ndarray, Basis], traceless: bool = False) -> ndarray:
if M.shape[-1] != M.shape[-2]:
raise ValueError('M should be square in its last two axes')

def cast(arr): return arr.real if hermitian else arr

# Squeeze out an extra dimension to be shape agnostic
square = M.ndim < 3
if square:
Expand All @@ -740,18 +751,19 @@ def ggm_expand(M: Union[ndarray, Basis], traceless: bool = False) -> ndarray:
diag_idx_shifted = tuple([...] + [tuple(i for i in range(1, d))]*2)

# Compute the coefficients
coeffs = np.zeros((*M.shape[:-2], d**2), dtype=complex)
coeffs = np.zeros((*M.shape[:-2], d**2), dtype=float if hermitian else complex)
if not traceless:
# First element is proportional to the trace of M
coeffs[..., 0] = np.einsum('...jj', M)/np.sqrt(d)
coeffs[..., 0] = cast(np.einsum('...jj', M))/np.sqrt(d)

# Elements proportional to the symmetric GGMs
coeffs[..., sym_rng] = (M[triu_idx] + M[tril_idx])/np.sqrt(2)
coeffs[..., sym_rng] = cast(M[triu_idx] + M[tril_idx])/np.sqrt(2)
# Elements proportional to the antisymmetric GGMs
coeffs[..., sym_rng + n_sym] = 1j*(M[triu_idx] - M[tril_idx])/np.sqrt(2)
coeffs[..., sym_rng + n_sym] = cast(1j*(M[triu_idx] - M[tril_idx]))/np.sqrt(2)
# Elements proportional to the diagonal GGMs
coeffs[..., diag_rng + 2*n_sym] = M[diag_idx].cumsum(axis=-1) - diag_rng*M[diag_idx_shifted]
coeffs[..., diag_rng + 2*n_sym] /= np.sqrt(diag_rng*(diag_rng + 1))
coeffs[..., diag_rng + 2*n_sym] = cast(M[diag_idx].cumsum(axis=-1)
- diag_rng*M[diag_idx_shifted])
coeffs[..., diag_rng + 2*n_sym] /= cast(np.sqrt(diag_rng*(diag_rng + 1)))

return coeffs.squeeze() if square else coeffs

Expand Down
13 changes: 7 additions & 6 deletions filter_functions/gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,11 +174,12 @@ def _liouville_derivative(dt: Coefficients, propagators: ndarray, basis: Basis,

# Calculate the whole propagator derivative
propagators_deriv = np.zeros((c_opers_transformed.shape[1], n-1, n, d, d), dtype=complex)
U_deriv_transformed = np.zeros((c_opers_transformed.shape[1], n-1, d, d), dtype=complex)
for g in range(n-1):
propagators_deriv[:, g, :g+1] = (propagators[g+1]
@ propagators[1:g+2].conj().swapaxes(-1, -2)
@ U_deriv[:, :g+1]
@ propagators[:g+1])
U_deriv_transformed[:, g] = (propagators[g+1].conj().swapaxes(-1, -2)
@ U_deriv[:, g]
@ propagators[g])
propagators_deriv[:, g, :g+1] = propagators[g+1] @ U_deriv_transformed[:, :g+1]

# Equivalent but usually slower contraction: 'htsba,jbc,tcd,kda->thsjk'
# Can just take 2*Re(·) when calculating x + x*
Expand Down Expand Up @@ -466,8 +467,8 @@ def calculate_derivative_of_control_matrix_from_scratch(

See Also
--------
:func:`_liouville_derivative`
:func:`_control_matrix_at_timestep_derivative`
_liouville_derivative
_control_matrix_at_timestep_derivative
"""
# Distinction between control and drift operators and only
# calculate the derivatives in control direction
Expand Down
74 changes: 39 additions & 35 deletions filter_functions/numeric.py
Original file line number Diff line number Diff line change
Expand Up @@ -444,8 +444,8 @@ def calculate_noise_operators_from_atomic(

See Also
--------
:func:`calculate_noise_operators_from_scratch`: Compute the operators from scratch.
:func:`calculate_control_matrix_from_atomic`: Same calculation in Liouville space.
calculate_noise_operators_from_scratch: Compute the operators from scratch.
calculate_control_matrix_from_atomic: Same calculation in Liouville space.
"""
n = len(noise_operators_atomic)
# Allocate memory
Expand Down Expand Up @@ -572,8 +572,8 @@ def calculate_noise_operators_from_scratch(

See Also
--------
:func:`calculate_noise_operators_from_atomic`: Compute the operators from atomic segments.
:func:`calculate_control_matrix_from_scratch`: Same calculation in Liouville space.
calculate_noise_operators_from_atomic: Compute the operators from atomic segments.
calculate_control_matrix_from_scratch: Same calculation in Liouville space.
"""
if t is None:
t = np.concatenate(([0], np.asarray(dt).cumsum()))
Expand Down Expand Up @@ -624,7 +624,7 @@ def calculate_noise_operators_from_scratch(
return noise_operators


@util.parse_optional_parameters({'which': ('total', 'correlations')})
@util.parse_optional_parameters(which=('total', 'correlations'))
def calculate_control_matrix_from_atomic(
phases: ndarray,
control_matrix_atomic: ndarray,
Expand Down Expand Up @@ -859,7 +859,7 @@ def calculate_control_matrix_from_scratch(

def calculate_control_matrix_periodic(phases: ndarray, control_matrix: ndarray,
total_propagator_liouville: ndarray,
repeats: int) -> ndarray:
repeats: int, check_invertible: bool = True) -> ndarray:
r"""
Calculate the control matrix of a periodic pulse given the phase
factors, control matrix and transfer matrix of the total propagator,
Expand All @@ -877,6 +877,11 @@ def calculate_control_matrix_periodic(phases: ndarray, control_matrix: ndarray,
pulse.
repeats: int
The number of repetitions.
check_invertible: bool, optional
Check for all frequencies if the inverse :math:`\mathbb{I} -
e^{i\omega T} \mathcal{Q}^{(1)}` exists by calculating the
determinant. If it does not exist, the sum is evaluated
explicitly for those points. The default is True.

Returns
-------
Expand Down Expand Up @@ -909,28 +914,23 @@ def calculate_control_matrix_periodic(phases: ndarray, control_matrix: ndarray,
# evaluate it explicitly.
eye = np.eye(total_propagator_liouville.shape[0])
T = np.multiply.outer(phases, total_propagator_liouville)

# Mask the invertible frequencies. The chosen atol is somewhat empiric.
M = eye - T
M_inv = nla.inv(M)
good_inverse = np.isclose(M_inv @ M, eye, atol=1e-10, rtol=0).all((1, 2))
if check_invertible:
invertible = ~np.isclose(nla.det(M), 0)
else:
invertible = np.array(True)

S = np.empty((*phases.shape, *total_propagator_liouville.shape), dtype=complex)
# Evaluate the sum for invertible frequencies
S[good_inverse] = M_inv[good_inverse] @ (eye - nla.matrix_power(T[good_inverse], repeats))

# Evaluate the sum for non-invertible frequencies
# HINT: Using numba, this could be a factor of ten or so faster. But since
# usually very few omega-values have a bad inverse, the compilation
# overhead is not compensated.
if (~good_inverse).any():
S[~good_inverse] = eye + sum(accumulate(repeat(T[~good_inverse], repeats-1), np.matmul))
# Solve LSE instead of computing inverse, faster + numerically more stable
S[invertible] = nla.solve(M[invertible], eye - nla.matrix_power(T[invertible], repeats))
if (~invertible).any():
S[~invertible] = eye + sum(accumulate(repeat(T[~invertible], repeats-1), np.matmul))

control_matrix_tot = (control_matrix.transpose(2, 0, 1) @ S).transpose(1, 2, 0)
return control_matrix_tot


@util.parse_optional_parameters({'which': ('total', 'correlations')})
@util.parse_optional_parameters(which=('total', 'correlations'))
def calculate_cumulant_function(
pulse: 'PulseSequence',
spectrum: Optional[ndarray] = None,
Expand Down Expand Up @@ -1096,23 +1096,25 @@ def calculate_cumulant_function(
if d == 2 and pulse.basis.btype in ('Pauli', 'GGM'):
# Single qubit case. Can use simplified expression
cumulant_function = np.zeros(decay_amplitudes.shape, decay_amplitudes.dtype)
diag_mask = np.eye(N, dtype=bool)
diag_mask = np.zeros((N, N), dtype=bool)
diag_mask[1:, 1:] = ~np.eye(N-1, dtype=bool)

# Offdiagonal terms
cumulant_function[..., ~diag_mask] = decay_amplitudes[..., ~diag_mask]
cumulant_function[..., diag_mask] = decay_amplitudes[..., diag_mask]

# Diagonal terms K_ii given by sum over diagonal of Gamma excluding
# Gamma_ii. Since the Pauli basis is traceless, K_00 is zero, therefore
# start at K_11.
diag_idx = deque((True, False, True, True))
diag_deque = deque((False, True, True))
for i in range(1, N):
diag_idx = [False] + list(diag_deque)
cumulant_function[..., i, i] = -decay_amplitudes[..., diag_idx, diag_idx].sum(axis=-1)
# shift the item not summed over by one
diag_idx.rotate()
diag_deque.rotate()

if second_order:
cumulant_function -= frequency_shifts
cumulant_function += frequency_shifts.swapaxes(-1, -2)
cumulant_function[..., 1:, 1:] -= frequency_shifts[..., 1:, 1:]
cumulant_function[..., 1:, 1:] += frequency_shifts[..., 1:, 1:].swapaxes(-1, -2)
else:
# Multi qubit case. Use general expression. Drop imaginary part since
# result is guaranteed to be real (if we didn't do anything wrong)
Expand All @@ -1134,7 +1136,7 @@ def calculate_cumulant_function(
return cumulant_function.real


@util.parse_optional_parameters({'which': ('total', 'correlations')})
@util.parse_optional_parameters(which=('total', 'correlations'))
def calculate_decay_amplitudes(
pulse: 'PulseSequence',
spectrum: ndarray,
Expand Down Expand Up @@ -1353,7 +1355,7 @@ def calculate_frequency_shifts(
return frequency_shifts


@util.parse_which_FF_parameter
@util.parse_optional_parameters(which=('fidelity', 'generalized'))
def calculate_filter_function(control_matrix: ndarray, which: str = 'fidelity') -> ndarray:
r"""Compute the filter function from the control matrix.

Expand Down Expand Up @@ -1524,8 +1526,10 @@ def calculate_second_order_filter_function(
intermediates = dict()

# Work around possibly populated intermediates dict with missing keys
n_opers_transformed = intermediates.get('n_opers_transformed',
_transform_hamiltonian(eigvecs, n_opers, n_coeffs))
n_opers_transformed = intermediates.get('n_opers_transformed')
if n_opers_transformed is None:
n_opers_transformed = _transform_hamiltonian(eigvecs, n_opers, n_coeffs)

try:
basis_transformed_cache = intermediates['basis_transformed']
ctrlmat_step_cache = intermediates['control_matrix_step']
Expand Down Expand Up @@ -1585,7 +1589,7 @@ def calculate_second_order_filter_function(
return result


@util.parse_which_FF_parameter
@util.parse_optional_parameters(which=('fidelity', 'generalized'))
def calculate_pulse_correlation_filter_function(control_matrix: ndarray,
which: str = 'fidelity') -> ndarray:
r"""Compute pulse correlation filter function from control matrix.
Expand Down Expand Up @@ -1786,7 +1790,7 @@ def error_transfer_matrix(
For non-Gaussian noise the expression above is perturbative and
includes noise up to order :math:`\xi^2` and hence
:math:`\tilde{\mathcal{U}} = \mathbb{1} + \mathcal{K}(\tau) +
\mathcal{O}(\xi^2)`
\mathcal{O}(\xi^4)`
(although it is evaluated as a matrix exponential in any case).

Given the above expression of the error transfer matrix, the
Expand Down Expand Up @@ -1829,7 +1833,7 @@ def error_transfer_matrix(
return error_transfer_matrix


@util.parse_optional_parameters({'which': ('total', 'correlations')})
@util.parse_optional_parameters(which=('total', 'correlations'))
def infidelity(
pulse: 'PulseSequence',
spectrum: Union[Coefficients, Callable],
Expand Down Expand Up @@ -2028,12 +2032,12 @@ def infidelity(
else:
raise ValueError("spacing should be either 'linear' or 'log'.")

delta_n = (n_max - n_min)//n_points
delta_n = (n_max - n_min)//(n_points - 1)
n_samples = np.arange(n_min, n_max + delta_n, delta_n)

convergence_infids = np.empty((len(n_samples), len(idx)))
for i, n in enumerate(n_samples):
freqs = xspace(omega_IR, omega_UV, n//2)
freqs = xspace(omega_IR, omega_UV, n)
convergence_infids[i] = infidelity(pulse, spectrum(freqs), freqs,
n_oper_identifiers=n_oper_identifiers,
which='total', show_progressbar=show_progressbar,
Expand Down
Loading