diff --git a/pygsti/baseobjs/errorgenbasis.py b/pygsti/baseobjs/errorgenbasis.py index 8f254198e..0c06fa1e3 100644 --- a/pygsti/baseobjs/errorgenbasis.py +++ b/pygsti/baseobjs/errorgenbasis.py @@ -460,6 +460,19 @@ def label_index(self, elemgen_label, ok_if_missing=False): ok_if_missing : bool If True, then returns `None` instead of an integer when the given label is not present. """ + try: + return self.labels.index(elemgen_label) + except ValueError as error: + + if ok_if_missing: + return None + else: + raise error + """ + TODO: 2 qubit labels returning None when label does exist in self.labels. + '(support, left_support) not in self._offsets[eetype]' returning True incorrectly + + support = elemgen_label.sslbls eetype = elemgen_label.errorgen_type bels = elemgen_label.basis_element_labels @@ -490,6 +503,7 @@ def label_index(self, elemgen_label, ok_if_missing=False): raise ValueError("Invalid elementary errorgen type: %s" % str(eetype)) return base + indices[elemgen_label] + """ #@property #def sslbls(self): diff --git a/pygsti/modelmembers/povms/__init__.py b/pygsti/modelmembers/povms/__init__.py index 3fc28cc29..547823519 100644 --- a/pygsti/modelmembers/povms/__init__.py +++ b/pygsti/modelmembers/povms/__init__.py @@ -12,8 +12,10 @@ import _collections import functools as _functools import itertools as _itertools - +import warnings import numpy as _np +import scipy.linalg as _spl +import scipy.optimize as _spo from .complementeffect import ComplementPOVMEffect from .composedeffect import ComposedPOVMEffect @@ -35,6 +37,8 @@ from pygsti.baseobjs import statespace as _statespace from pygsti.tools import basistools as _bt from pygsti.tools import optools as _ot +from pygsti.tools import sum_of_negative_choi_eigenvalues_gate +from pygsti.baseobjs import Basis # Avoid circular import import pygsti.modelmembers as _mm @@ -295,7 +299,7 @@ def povm_type_from_op_type(op_type): return povm_type_preferences -def convert(povm, to_type, basis, ideal_povm=None, flatten_structure=False): +def convert(povm, to_type, basis, cp_penalty=1e-7, ideal_povm=None, flatten_structure=False): """ TODO: update docstring Convert a POVM to a new type of parameterization. @@ -327,6 +331,14 @@ def convert(povm, to_type, basis, ideal_povm=None, flatten_structure=False): are separately converted, leaving the original POVM's structure unchanged. When `True`, composed and embedded operations are "flattened" into a single POVM of the requested `to_type`. + + cp_penalty : float, optional (default 1e-7) + Converting SPAM operations to an error generator representation may + introduce trivial gauge degrees of freedom. These gauge degrees of freedom + are called trivial because they quite literally do not change the dense representation + (i.e. Hilbert-Schmidt vectors) at all. Despite being trivial, error generators along + this trivial gauge orbit may be non-CP, so this cptp penalty is used to favor channels + within this gauge orbit which are CPTP. Returns ------- @@ -334,6 +346,8 @@ def convert(povm, to_type, basis, ideal_povm=None, flatten_structure=False): The converted POVM vector, usually a distinct object from the object passed as input. """ + + ##TEST CONVERSION BETWEEN LINBLAD TYPES to_types = to_type if isinstance(to_type, (tuple, list)) else (to_type,) # HACK to support multiple to_type values error_msgs = {} @@ -364,6 +378,7 @@ def convert(povm, to_type, basis, ideal_povm=None, flatten_structure=False): from ..operations import LindbladParameterization as _LindbladParameterization lndtype = _LindbladParameterization.cast(to_type) + #Construct a static "base" POVM if isinstance(povm, ComputationalBasisPOVM): # special easy case base_povm = ComputationalBasisPOVM(povm.state_space.num_qubits, povm.evotype) # just copy it? @@ -375,14 +390,104 @@ def convert(povm, to_type, basis, ideal_povm=None, flatten_structure=False): for lbl, vec in povm.items()] else: raise RuntimeError('Evotype must be compatible with Hilbert ops to use pure effects') - except Exception: # try static mixed states next: - base_items = [(lbl, convert_effect(vec, 'static', basis, idl.get(lbl, None), flatten_structure)) - for lbl, vec in povm.items()] + except RuntimeError: # try static mixed states next: + #if idl.get(lbl,None) is not None: + + base_items = [] + for lbl, vec in povm.items(): + ideal_effect = idl.get(lbl,None) + if ideal_effect is not None: + base_items.append((lbl, convert_effect(ideal_effect, 'static', basis, ideal_effect, flatten_structure))) + else: + base_items.append((lbl, convert_effect(vec, 'static', basis, idl.get(lbl, None), flatten_structure))) base_povm = UnconstrainedPOVM(base_items, povm.evotype, povm.state_space) proj_basis = 'PP' if povm.state_space.is_entirely_qubits else basis errorgen = _LindbladErrorgen.from_error_generator(povm.state_space.dim, lndtype, proj_basis, basis, truncate=True, evotype=povm.evotype) + + #Collect all ideal effects + base_dense_effects = [] + for item in base_items: + dense_effect = item[1].to_dense() + base_dense_effects.append(dense_effect.reshape((1,len(dense_effect)))) + + dense_ideal_povm = _np.concatenate(base_dense_effects, axis=0) + + #Collect all noisy effects + dense_effects = [] + for effect in povm.values(): + dense_effect = effect.to_dense() + dense_effects.append(dense_effect.reshape((1,len(dense_effect)))) + + dense_povm = _np.concatenate(dense_effects, axis=0) + degrees_of_freedom = (dense_ideal_povm.shape[0] - 1) * dense_ideal_povm.shape[1] + + #It is often the case that there are more error generators than physical degrees of freedom in the POVM + #We define a function which finds linear comb. of errgens that span these degrees of freedom. + #This has been called "the trivial gauge", and this function is meant to avoid it + def calc_physical_subspace(dense_ideal_povm, epsilon = 1e-9): + + errgen = _LindbladErrorgen.from_error_generator(povm.state_space.dim, parameterization=to_type) + if degrees_of_freedom > errgen.num_params: + warnings.warn("POVM has more degrees of freedom than the available number of parameters, representation in this parameterization is not guaranteed") + exp_errgen = _ExpErrorgenOp(errgen) + + num_errgens = errgen.num_params + #TODO: Maybe we can use the num of params instead of number of matrix entries, as some of them are linearly dependent. + #i.e E0 completely determines E1 if those are the only two povm elements (E0 + E1 = Identity) + num_entries = dense_ideal_povm.shape[0]*dense_ideal_povm.shape[1] + #assert num_errgens >= povm.num_params, "POVM has too many elements, error generator parameterization is not possible" + + ideal_vec = _np.zeros(num_errgens) + + #Compute the jacobian with respect to the error generators. This will allow us to see which + #error generators change the POVM entries + J = _np.zeros((num_entries,num_errgens)) + + for i in range(len(ideal_vec)): + new_vec = ideal_vec.copy() + new_vec[i] = epsilon + exp_errgen.from_vector(new_vec) + vectorized_povm = _np.zeros(num_entries) + perturbed_povm = (dense_ideal_povm @ exp_errgen.to_dense() - dense_ideal_povm)/epsilon + + perturbed_povm_t = perturbed_povm.transpose() + for j, column in enumerate(perturbed_povm_t): + vectorized_povm[j*len(perturbed_povm_t[0]):(j+1)*len(perturbed_povm_t[0])] = column + + J[:,i] = vectorized_povm.transpose() + + _,S,V = _np.linalg.svd(J) + return V[:len(S),] + + phys_directions = calc_physical_subspace(dense_ideal_povm) + + #We use optimization to find the best error generator representation + #we only vary physical directions, not independent error generators + def _objfn(v): + + #For some reason adding the sum_of_negative_choi_eigenvalues_gate term + #resulted in minimize() sometimes choosing NaN values for v. There are + #two stack exchange issues showing this problem with no solution. + if _np.isnan(v).any(): + v = _np.zeros(len(v)) + + L_vec = _np.zeros(len(phys_directions[0])) + for coeff, phys_direction in zip(v,phys_directions): + L_vec += coeff * phys_direction + errorgen.from_vector(L_vec) + proc_matrix = _spl.expm(errorgen.to_dense()) + + return _np.linalg.norm(dense_povm - dense_ideal_povm @ proc_matrix) + cp_penalty * sum_of_negative_choi_eigenvalues_gate(proc_matrix, basis) + + soln = _spo.minimize(_objfn, _np.zeros(len(phys_directions), 'd'), method="Nelder-Mead", options={}, + tol=1e-13) # , callback=callback) + if not soln.success and soln.fun > 1e-6: # not "or" because success is often not set correctly + raise ValueError("Failed to find an errorgen such that == |state> + dense_st = st.to_dense() dense_state = state.to_dense() - + num_qubits = st.state_space.num_qubits + + errgen = _LindbladErrorgen.from_error_generator(2**(2*num_qubits), parameterization=to_type) + num_errgens = errgen.num_params + + #GLND for states suffers from "trivial gauge" freedom. This function identifies + #the physical directions to avoid this gauge. + def calc_physical_subspace(ideal_prep, epsilon = 1e-9): + + exp_errgen = _ExpErrorgenOp(errgen) + ideal_vec = _np.zeros(num_errgens) + + #Compute the jacobian with respect to the error generators. This will allow us to see which + #error generators change the POVM entries + J = _np.zeros((state.num_params, num_errgens)) + + for i in range(len(ideal_vec)): + new_vec = ideal_vec.copy() + new_vec[i] = epsilon + exp_errgen.from_vector(new_vec) + J[:,i] = (exp_errgen.to_dense() @ ideal_prep - ideal_prep)[1:]/epsilon + + _,S,V = _np.linalg.svd(J) + return V[:len(S),] + + phys_directions = calc_physical_subspace(dense_state) + + #We use optimization to find the best error generator representation + #we only vary physical directions, not independent error generators def _objfn(v): - errorgen.from_vector(v) - return _np.linalg.norm(_spl.expm(errorgen.to_dense()) @ dense_st - dense_state) - #def callback(x): print("callbk: ",_np.linalg.norm(x),_objfn(x)) # REMOVE - soln = _spo.minimize(_objfn, _np.zeros(errorgen.num_params, 'd'), method="CG", options={}, - tol=1e-8) # , callback=callback) + L_vec = _np.zeros(len(phys_directions[0])) + for coeff, phys_direction in zip(v,phys_directions): + L_vec += coeff * phys_direction + errorgen.from_vector(L_vec) + proc_matrix = _spl.expm(errorgen.to_dense()) + return _np.linalg.norm(proc_matrix @ dense_st - dense_state) + cp_penalty * sum_of_negative_choi_eigenvalues_gate(proc_matrix, basis) + + soln = _spo.minimize(_objfn, _np.zeros(len(phys_directions), 'd'), method="Nelder-Mead", options={}, + tol=1e-13) # , callback=callback) #print("DEBUG: opt done: ",soln.success, soln.fun, soln.x) # REMOVE if not soln.success and soln.fun > 1e-6: # not "or" because success is often not set correctly raise ValueError("Failed to find an errorgen such that exp(errorgen)|ideal> = |state>") - errorgen.from_vector(soln.x) + + errgen_vec = _np.linalg.pinv(phys_directions) @ soln.x + errorgen.from_vector(errgen_vec) EffectiveExpErrorgen = _IdentityPlusErrorgenOp if lndtype.meta == '1+' else _ExpErrorgenOp return ComposedState(st, EffectiveExpErrorgen(errorgen)) diff --git a/pygsti/models/explicitmodel.py b/pygsti/models/explicitmodel.py index 2faa9c955..584754853 100644 --- a/pygsti/models/explicitmodel.py +++ b/pygsti/models/explicitmodel.py @@ -340,7 +340,7 @@ def __getitem__(self, label): raise KeyError("Key %s has an invalid prefix" % label) def convert_members_inplace(self, to_type, categories_to_convert='all', labels_to_convert='all', - ideal_model=None, flatten_structure=False, set_default_gauge_group=False, cptp_truncation_tol= 1e-6): + ideal_model=None, flatten_structure=False, set_default_gauge_group=False, cptp_truncation_tol= 1e-7, spam_cp_penalty = 1e-7): """ TODO: docstring -- like set_all_parameterizations but doesn't set default gauge group by default """ @@ -359,12 +359,12 @@ def convert_members_inplace(self, to_type, categories_to_convert='all', labels_t for lbl, prep in self.preps.items(): if labels_to_convert == 'all' or lbl in labels_to_convert: ideal = ideal_model.preps.get(lbl, None) if (ideal_model is not None) else None - self.preps[lbl] = _state.convert(prep, to_type, self.basis, ideal, flatten_structure) + self.preps[lbl] = _state.convert(prep, to_type, self.basis, spam_cp_penalty, ideal, flatten_structure) if any([c in categories_to_convert for c in ('all', 'povms')]): for lbl, povm in self.povms.items(): if labels_to_convert == 'all' or lbl in labels_to_convert: ideal = ideal_model.povms.get(lbl, None) if (ideal_model is not None) else None - self.povms[lbl] = _povm.convert(povm, to_type, self.basis, ideal, flatten_structure) + self.povms[lbl] = _povm.convert(povm, to_type, self.basis, spam_cp_penalty, ideal, flatten_structure) self._clean_paramvec() # param indices were probabaly updated if set_default_gauge_group: @@ -382,7 +382,7 @@ def set_default_gauge_group_for_member_type(self, member_type): self.default_gauge_group = _gg.TrivialGaugeGroup(self.state_space) def set_all_parameterizations(self, gate_type, prep_type="auto", povm_type="auto", - instrument_type="auto", ideal_model=None, cptp_truncation_tol = 1e-6): + instrument_type="auto", ideal_model=None, cptp_truncation_tol = 1e-6, spam_cp_penalty = 1e-7): """ Convert all gates, states, and POVMs to a specific parameterization type. @@ -424,6 +424,13 @@ def set_all_parameterizations(self, gate_type, prep_type="auto", povm_type="auto indicates the maximum amount of truncation induced deviation from the original operations (measured by frobenius distance) we're willing to accept without marking the conversion as failed. + spam_cp_penalty : float, optional (default .5) + Converting SPAM operations to an error generator representation may + introduce trivial gauge degrees of freedom. These gauge degrees of freedom + are called trivial because they quite literally do not change the dense representation + (i.e. Hilbert-Schmidt vectors) at all. Despite being trivial, error generators along + this trivial gauge orbit may be non-CP, so this cptp penalty is used to favor channels + within this gauge orbit which are CPTP. Returns ------- @@ -444,8 +451,8 @@ def set_all_parameterizations(self, gate_type, prep_type="auto", povm_type="auto try: self.convert_members_inplace(typ, 'operations', 'all', flatten_structure=True, ideal_model=static_model, cptp_truncation_tol = cptp_truncation_tol) self.convert_members_inplace(ityp, 'instruments', 'all', flatten_structure=True, ideal_model=static_model, cptp_truncation_tol = cptp_truncation_tol) - self.convert_members_inplace(rtyp, 'preps', 'all', flatten_structure=True, ideal_model=static_model, cptp_truncation_tol = cptp_truncation_tol) - self.convert_members_inplace(povmtyp, 'povms', 'all', flatten_structure=True, ideal_model=static_model, cptp_truncation_tol = cptp_truncation_tol) + self.convert_members_inplace(rtyp, 'preps', 'all', flatten_structure=True, ideal_model=static_model, cptp_truncation_tol = cptp_truncation_tol, spam_cp_penalty = spam_cp_penalty) + self.convert_members_inplace(povmtyp, 'povms', 'all', flatten_structure=True, ideal_model=static_model, cptp_truncation_tol = cptp_truncation_tol, spam_cp_penalty = spam_cp_penalty) except ValueError as e: raise ValueError("Failed to convert members. If converting to CPTP-based models, " + "try providing an ideal_model to avoid possible branch cuts.") from e diff --git a/pygsti/models/modelparaminterposer.py b/pygsti/models/modelparaminterposer.py index aa86fc5f3..0fbefb341 100644 --- a/pygsti/models/modelparaminterposer.py +++ b/pygsti/models/modelparaminterposer.py @@ -66,7 +66,7 @@ def __init__(self, transform_matrix): self.transform_matrix = transform_matrix # cols specify a model parameter in terms of op params. self.inv_transform_matrix = _np.linalg.pinv(self.transform_matrix) super().__init__(transform_matrix.shape[1], transform_matrix.shape[0]) - + def model_paramvec_to_ops_paramvec(self, v): return _np.dot(self.transform_matrix, v) diff --git a/pygsti/tools/jamiolkowski.py b/pygsti/tools/jamiolkowski.py index cafabb2d9..b631988e1 100644 --- a/pygsti/tools/jamiolkowski.py +++ b/pygsti/tools/jamiolkowski.py @@ -273,6 +273,27 @@ def fast_jamiolkowski_iso_std_inv(choi_mx, op_mx_basis): #transform operation matrix into appropriate basis return _bt.change_basis(opMxInStdBasis, op_mx_basis.create_equivalent('std'), op_mx_basis) +def sum_of_negative_choi_eigenvalues_gate(op_mx, op_mx_basis): + """ + Compute the sum of the negative Choi eigenvalues of a process matrix. + + Parameters + ---------- + op_mx : np.array + + op_mx_basis : Basis + + Returns + ------- + float + the sum of the negative eigenvalues of the Choi representation of op_mx + """ + sumOfNeg = 0 + J = fast_jamiolkowski_iso_std(op_mx, op_mx_basis) # Choi mx basis doesn't matter + evals = _np.linalg.eigvals(J) # could use eigvalsh, but wary of this since eigh can be wrong... + for ev in evals: + if ev.real < 0: sumOfNeg -= ev.real + return sumOfNeg def sum_of_negative_choi_eigenvalues(model, weights=None): """ diff --git a/pygsti/tools/optools.py b/pygsti/tools/optools.py index 8d68a73ff..fa08c173f 100644 --- a/pygsti/tools/optools.py +++ b/pygsti/tools/optools.py @@ -306,9 +306,6 @@ def diamonddist(a, b, mx_basis='pp', return_x=False): # currently cvxpy is only needed for this function, so don't import until here import cvxpy as _cvxpy - old_cvxpy = bool(tuple(map(int, _cvxpy.__version__.split('.'))) < (1, 0)) - if old_cvxpy: - raise RuntimeError('CVXPY 0.4 is no longer supported. Please upgrade to CVXPY 1.0 or higher.') # _jam code below assumes *un-normalized* Jamiol-isomorphism. # It will convert a & b to a "single-block" basis representation diff --git a/test/unit/objects/test_model.py b/test/unit/objects/test_model.py index fdad0a22c..8c537e93e 100644 --- a/test/unit/objects/test_model.py +++ b/test/unit/objects/test_model.py @@ -66,12 +66,12 @@ def test_construction(self): self.assertEqual(list(self.model.povms.keys()), ["Mdefault"]) # Test default prep/effects - self.assertArraysAlmostEqual(self.model.prep, self.model.preps["rho0"]) + self.assertArraysAlmostEqual(self.model.prep.to_dense(), self.model.preps["rho0"].to_dense()) self.assertEqual(set(self.model.effects.keys()), set(['0', '1'])) self.assertTrue(isinstance(self.model['Gi'], LinearOperator)) - +#TODO: Add tests between more combinations of parameterizations class FullModelBase(ModelBase): """Base class for test cases using a full-parameterized model""" build_options = {'gate_type': 'full'} @@ -86,7 +86,9 @@ class StaticModelBase(ModelBase): """Base class for test cases using a static-parameterized model""" build_options = {'gate_type': 'static'} - +class GLNDModelBase(ModelBase): + """Base class for test cases using a static-parameterized model""" + build_options = {'gate_type': 'GLND'} ## # Method base classes, controlling which methods will be tested # @@ -95,10 +97,25 @@ def _assert_model_params(self, nOperations, nSPVecs, nEVecs, nParamsPerGate, nPa nParams = nOperations * nParamsPerGate + nSPVecs * nParamsPerSP + nEVecs * 4 print("num params = ", self.model.num_params) self.assertEqual(self.model.num_params, nParams) - # TODO does this actually assert correctness? + + def _assert_model_ops(self, oldModel): + #test operations + for (_, gate),(_,gate2) in zip(self.model.operations.items(), oldModel.operations.items() ): + assert np.allclose(gate.to_dense(), gate2.to_dense()), "Discrepancy in operation process matrices when converting parameterizations" + + def _assert_model_SPAM(self, oldModel): + for (_, povm1), (_, povm2) in zip(self.model.povms.items(), oldModel.povms.items()): + for element1, element2 in zip(povm1.items(), povm2.items()): + assert np.allclose(element1[1].to_dense(), element2[1].to_dense()), "Discrepancy in POVM superbra when converting parameterizations" + + for (_, prep1), (_, prep2) in zip(self.model.preps.items(), oldModel.preps.items()): + assert np.allclose(prep1.to_dense(), prep2.to_dense()), "Discrepancy in state prep superket when converting parameterizations" def test_set_all_parameterizations_full(self): + model_copy = self.model.copy() self.model.set_all_parameterizations("full") + self._assert_model_ops(model_copy) + self._assert_model_SPAM(model_copy) self._assert_model_params( nOperations=3, nSPVecs=1, @@ -108,7 +125,10 @@ def test_set_all_parameterizations_full(self): ) def test_set_all_parameterizations_TP(self): + model_copy = self.model.copy() self.model.set_all_parameterizations("full TP") + self._assert_model_ops(model_copy) + self._assert_model_SPAM(model_copy) self._assert_model_params( nOperations=3, nSPVecs=1, @@ -118,7 +138,10 @@ def test_set_all_parameterizations_TP(self): ) def test_set_all_parameterizations_static(self): + model_copy = self.model.copy() self.model.set_all_parameterizations("static") + self._assert_model_ops(model_copy) + self._assert_model_SPAM(model_copy) self._assert_model_params( nOperations=0, nSPVecs=0, @@ -128,21 +151,25 @@ def test_set_all_parameterizations_static(self): ) def test_set_all_parameterizations_HS(self): + model_copy = self.model.copy() self.model.set_all_parameterizations("H+S") - self._assert_model_params( - nOperations=3, - nSPVecs=2, - nEVecs=0, - nParamsPerGate=6, - nParamsPerSP=6 - ) + self._assert_model_ops(model_copy) + self._assert_model_SPAM(model_copy) + assert self.model.num_params == 6 * (3 + 1 + 1) + + def test_set_all_parameterizations_GLND(self): + model_copy = self.model.copy() + self.model.set_all_parameterizations("GLND") + self._assert_model_ops(model_copy) + self._assert_model_SPAM(model_copy) + assert self.model.num_params == 12 * (3 + 1 + 1) def test_element_accessors(self): # XXX what does this test cover and is it useful? EGN: covers the __getitem__/__setitem__ functions of model v = np.array([[1.0 / np.sqrt(2)], [0], [0], [1.0 / np.sqrt(2)]], 'd') self.model['rho1'] = v w = self.model['rho1'] - self.assertArraysAlmostEqual(w, v) + self.assertArraysAlmostEqual(w.to_dense(), v.T) del self.model.preps['rho1'] # TODO assert correctness @@ -152,17 +179,6 @@ def test_element_accessors(self): Iz2 = self.model["Iz"] # get an instrument # TODO assert correctness if needed (can the underlying model even mutate this?) - def test_set_operation_matrix(self): - # TODO no random - Gi_test_matrix = np.random.random((4, 4)) - Gi_test_matrix[0, :] = [1, 0, 0, 0] # so TP mode works - self.model["Gi"] = Gi_test_matrix # set operation matrix - self.assertArraysAlmostEqual(self.model['Gi'], Gi_test_matrix) - - Gi_test_dense_op = FullArbitraryOp(Gi_test_matrix) - self.model["Gi"] = Gi_test_dense_op # set gate object - self.assertArraysAlmostEqual(self.model['Gi'], Gi_test_matrix) - def test_strdiff(self): other = models.create_explicit_model_from_expressions( [('Q0',)], ['Gi', 'Gx', 'Gy'], @@ -327,14 +343,14 @@ class ThresholdMethodBase(object): def test_product(self): circuit = ('Gx', 'Gy') - p1 = np.dot(self.model['Gy'], self.model['Gx']) + p1 = np.dot(self.model['Gy'].to_dense(), self.model['Gx'].to_dense()) p2 = self.model.sim.product(circuit, scale=False) p3, scale = self.model.sim.product(circuit, scale=True) self.assertArraysAlmostEqual(p1, p2) self.assertArraysAlmostEqual(p1, scale * p3) circuit = ('Gx', 'Gy', 'Gy') - p1 = np.dot(self.model['Gy'], np.dot(self.model['Gy'], self.model['Gx'])) + p1 = np.dot(self.model['Gy'].to_dense(), np.dot(self.model['Gy'].to_dense(), self.model['Gx'].to_dense())) p2 = self.model.sim.product(circuit, scale=False) p3, scale = self.model.sim.product(circuit, scale=True) self.assertArraysAlmostEqual(p1, p2) @@ -345,8 +361,8 @@ def test_bulk_product(self): gatestring2 = ('Gx', 'Gy', 'Gy') circuits = [gatestring1, gatestring2] - p1 = np.dot(self.model['Gy'], self.model['Gx']) - p2 = np.dot(self.model['Gy'], np.dot(self.model['Gy'], self.model['Gx'])) + p1 = np.dot(self.model['Gy'].to_dense(), self.model['Gx'].to_dense()) + p2 = np.dot(self.model['Gy'].to_dense(), np.dot(self.model['Gy'].to_dense(), self.model['Gx'].to_dense())) bulk_prods = self.model.sim.bulk_product(circuits) bulk_prods_scaled, scaleVals = self.model.sim.bulk_product(circuits, scale=True) @@ -398,15 +414,15 @@ def setUpClass(cls): cls.gatestring1 = ('Gx', 'Gy') cls.gatestring2 = ('Gx', 'Gy', 'Gy') cls._expected_probs = { - cls.gatestring1: np.dot(np.transpose(cls._model.povms['Mdefault']['0']), - np.dot(cls._model['Gy'], - np.dot(cls._model['Gx'], - cls._model.preps['rho0']))).reshape(-1)[0], - cls.gatestring2: np.dot(np.transpose(cls._model.povms['Mdefault']['0']), - np.dot(cls._model['Gy'], - np.dot(cls._model['Gy'], - np.dot(cls._model['Gx'], - cls._model.preps['rho0'])))).reshape(-1)[0] + cls.gatestring1: np.dot(np.transpose(cls._model.povms['Mdefault']['0'].to_dense()), + np.dot(cls._model['Gy'].to_dense(), + np.dot(cls._model['Gx'].to_dense(), + cls._model.preps['rho0'].to_dense()))).reshape(-1)[0], + cls.gatestring2: np.dot(np.transpose(cls._model.povms['Mdefault']['0'].to_dense()), + np.dot(cls._model['Gy'].to_dense(), + np.dot(cls._model['Gy'].to_dense(), + np.dot(cls._model['Gx'].to_dense(), + cls._model.preps['rho0'].to_dense())))).reshape(-1)[0] } # TODO expected dprobs & hprobs @@ -696,6 +712,16 @@ def test_probs_warns_on_nan_in_input(self): class TPModelTester(TPModelBase, StandardMethodBase, BaseCase): def test_tp_dist(self): self.assertAlmostEqual(self.model._tpdist(), 3.52633900335e-16, 5) + def test_set_operation_matrix(self): + # TODO no random + Gi_test_matrix = np.random.random((4, 4)) + Gi_test_matrix[0, :] = [1, 0, 0, 0] # so TP mode works + self.model["Gi"] = Gi_test_matrix # set operation matrix + self.assertArraysAlmostEqual(self.model['Gi'], Gi_test_matrix) + + Gi_test_dense_op = FullArbitraryOp(Gi_test_matrix) + self.model["Gi"] = Gi_test_dense_op # set gate object + self.assertArraysAlmostEqual(self.model['Gi'], Gi_test_matrix) class StaticModelTester(StaticModelBase, StandardMethodBase, BaseCase): @@ -715,7 +741,8 @@ def test_bulk_fill_hprobs_with_high_smallness_threshold(self): def test_iter_hprobs_by_rectangle(self): self.skipTest("TODO should probably warn user?") - +class LinbladModelTester(GLNDModelBase, StandardMethodBase, BaseCase): + pass class FullMapSimMethodTester(FullModelBase, SimMethodBase, BaseCase): def setUp(self): super(FullMapSimMethodTester, self).setUp()