From 781301294ede97548908ca764aadfe5fd2ccc12b Mon Sep 17 00:00:00 2001 From: Lukas Date: Fri, 13 Feb 2026 15:35:23 +0100 Subject: [PATCH 01/25] Added MPO.from_matrix() and tests. --- src/mqt/yaqs/core/data_structures/networks.py | 332 +++++++++++++++--- tests/core/data_structures/test_networks.py | 18 + 2 files changed, 294 insertions(+), 56 deletions(-) diff --git a/src/mqt/yaqs/core/data_structures/networks.py b/src/mqt/yaqs/core/data_structures/networks.py index 6ebf5a4a..986b540d 100644 --- a/src/mqt/yaqs/core/data_structures/networks.py +++ b/src/mqt/yaqs/core/data_structures/networks.py @@ -174,7 +174,9 @@ def __init__( vector[0] = rng.random() vector[1] = 1 - vector[0] elif state == "basis": - assert basis_string is not None, "basis_string must be provided for 'basis' state initialization." + assert ( + basis_string is not None + ), "basis_string must be provided for 'basis' state initialization." self.init_mps_from_basis(basis_string, self.physical_dimensions) break else: @@ -191,7 +193,9 @@ def __init__( if pad is not None: self.pad_bond_dimension(pad) - def init_mps_from_basis(self, basis_string: str, physical_dimensions: list[int]) -> None: + def init_mps_from_basis( + self, basis_string: str, physical_dimensions: list[int] + ) -> None: """Initialize a list of MPS tensors representing a product state from a basis string. Args: @@ -350,8 +354,12 @@ def get_schmidt_spectrum(self, sites: list[int]) -> NDArray[np.float64]: NDArray[np.float64]: The Schmidt spectrum (length 500), with unused entries filled with NaN. """ - assert len(sites) == 2, "Schmidt spectrum is defined on a bond (two adjacent sites)." - assert sites[0] + 1 == sites[1], "Schmidt spectrum only defined for nearest-neighbor cut." + assert ( + len(sites) == 2 + ), "Schmidt spectrum is defined on a bond (two adjacent sites)." + assert ( + sites[0] + 1 == sites[1] + ), "Schmidt spectrum only defined for nearest-neighbor cut." top_schmidt_vals = 500 i, j = sites a, b = self.tensors[i], self.tensors[j] @@ -407,7 +415,9 @@ def almost_equal(self, other: MPS) -> bool: return False return True - def shift_orthogonality_center_right(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: + def shift_orthogonality_center_right( + self, current_orthogonality_center: int, decomposition: str = "QR" + ) -> None: """Shifts orthogonality center right. This function performs a QR decomposition to shift the known current center to the right and move @@ -441,7 +451,9 @@ def shift_orthogonality_center_right(self, current_orthogonality_center: int, de self.tensors[current_orthogonality_center + 1], ) = (a_new, b_new) - def shift_orthogonality_center_left(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: + def shift_orthogonality_center_left( + self, current_orthogonality_center: int, decomposition: str = "QR" + ) -> None: """Shifts orthogonality center left. This function flips the network, performs a right shift, then flips the network again. @@ -452,10 +464,14 @@ def shift_orthogonality_center_left(self, current_orthogonality_center: int, dec Default is QR. """ self.flip_network() - self.shift_orthogonality_center_right(self.length - current_orthogonality_center - 1, decomposition) + self.shift_orthogonality_center_right( + self.length - current_orthogonality_center - 1, decomposition + ) self.flip_network() - def set_canonical_form(self, orthogonality_center: int, decomposition: str = "QR") -> None: + def set_canonical_form( + self, orthogonality_center: int, decomposition: str = "QR" + ) -> None: """Sets canonical form of MPS. Left and right normalizes an MPS around a selected site. @@ -466,7 +482,9 @@ def set_canonical_form(self, orthogonality_center: int, decomposition: str = "QR decomposition: Type of decomposition. Default QR. """ - def sweep_decomposition(orthogonality_center: int, decomposition: str = "QR") -> None: + def sweep_decomposition( + orthogonality_center: int, decomposition: str = "QR" + ) -> None: for site, _ in enumerate(self.tensors): if site == orthogonality_center: break @@ -497,13 +515,17 @@ def normalize(self, form: str = "B", decomposition: str = "QR") -> None: if form == "B": self.flip_network() - self.set_canonical_form(orthogonality_center=self.length - 1, decomposition=decomposition) + self.set_canonical_form( + orthogonality_center=self.length - 1, decomposition=decomposition + ) self.shift_orthogonality_center_right(self.length - 1, decomposition) if form == "B": self.flip_network() - def truncate(self, threshold: float = 1e-12, max_bond_dim: int | None = None) -> None: + def truncate( + self, threshold: float = 1e-12, max_bond_dim: int | None = None + ) -> None: """In-place MPS truncation via repeated two-site SVDs.""" orth_center = self.check_canonical_form()[0] if self.length == 1: @@ -525,7 +547,9 @@ def truncate(self, threshold: float = 1e-12, max_bond_dim: int | None = None) -> self.flip_network() - def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np.complex128: + def scalar_product( + self, other: MPS, sites: int | list[int] | None = None + ) -> np.complex128: """Compute the scalar (inner) product between two Matrix Product States (MPS). The function contracts the corresponding tensors of two MPS objects. If no specific site is @@ -552,8 +576,12 @@ def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np result = None for idx in range(self.length): # contract at each site into a 4-leg tensor - theta = oe.contract("abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx]) - result = theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) + theta = oe.contract( + "abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx] + ) + result = ( + theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) + ) # squeeze down to scalar assert result is not None return np.complex128(np.squeeze(result)) @@ -585,7 +613,9 @@ def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np msg = f"Invalid `sites` argument: {sites!r}" raise ValueError(msg) - def local_expect(self, operator: Observable, sites: int | list[int]) -> np.complex128: + def local_expect( + self, operator: Observable, sites: int | list[int] + ) -> np.complex128: """Compute the local expectation value of an operator on an MPS. The function applies the given operator to the tensor at the specified site of a deep copy of the @@ -611,11 +641,17 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl i = sites if isinstance(operator.sites, list): - assert operator.sites[0] == i, f"Operator sites mismatch {operator.sites[0]}, {i}" + assert ( + operator.sites[0] == i + ), f"Operator sites mismatch {operator.sites[0]}, {i}" elif isinstance(operator.sites, int): - assert operator.sites == i, f"Operator sites mismatch {operator.sites}, {i}" + assert ( + operator.sites == i + ), f"Operator sites mismatch {operator.sites}, {i}" - assert i is not None, f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" + assert ( + i is not None + ), f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" a = temp_state.tensors[i] temp_state.tensors[i] = oe.contract("ab, bcd->acd", operator.gate.matrix, a) @@ -626,10 +662,12 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl assert operator.sites[0] == i, "Observable sites mismatch" assert operator.sites[1] == j, "Observable sites mismatch" - assert operator.sites[0] < operator.sites[1], "Observable sites must be in ascending order." - assert operator.sites[1] - operator.sites[0] == 1, ( - "Only nearest-neighbor observables are currently implemented." - ) + assert ( + operator.sites[0] < operator.sites[1] + ), "Observable sites must be in ascending order." + assert ( + operator.sites[1] - operator.sites[0] == 1 + ), "Only nearest-neighbor observables are currently implemented." a = temp_state.tensors[i] b = temp_state.tensors[j] d_i, left, _ = a.shape @@ -641,7 +679,9 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl theta = theta.reshape(left, d_i * d_j, right) # (l, d_i*d_j, r) # 2) apply operator on the combined phys index - theta = oe.contract("ab, cbd->cad", operator.gate.matrix, theta) # (l, d_i*d_j, r) + theta = oe.contract( + "ab, cbd->cad", operator.gate.matrix, theta + ) # (l, d_i*d_j, r) theta = theta.reshape(left, d_i, d_j, right) # back to (l, d_i, d_j, r) # 3) split via SVD @@ -654,7 +694,9 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl u_tensor = u_mat.reshape(left, d_i, chi_new) # (l, d_i, r_new) a_new = u_tensor.transpose(1, 0, 2) # → (d_i, l, r_new) - v_tensor = (np.diag(s_vec) @ v_mat).reshape(chi_new, d_j, right) # (l_new, d_j, r) + v_tensor = (np.diag(s_vec) @ v_mat).reshape( + chi_new, d_j, right + ) # (l_new, d_j, r) b_new = v_tensor.transpose(1, 0, 2) # → (d_j, l_new, r) temp_state.tensors[i] = a_new @@ -690,24 +732,44 @@ def evaluate_observables( elif observable.gate.name == "total_bond": results[obs_index, column_index] = self.get_total_bond() elif observable.gate.name in {"entropy", "schmidt_spectrum"}: - assert isinstance(observable.sites, list), "Given metric requires a list of sites" - assert len(observable.sites) == 2, "Given metric requires 2 sites to act on." + assert isinstance( + observable.sites, list + ), "Given metric requires a list of sites" + assert ( + len(observable.sites) == 2 + ), "Given metric requires 2 sites to act on." max_site = max(observable.sites) min_site = min(observable.sites) - assert max_site - min_site == 1, "Entropy and Schmidt cuts must be nearest neighbor." + assert ( + max_site - min_site == 1 + ), "Entropy and Schmidt cuts must be nearest neighbor." for s in observable.sites: - assert s in range(self.length), f"Observable acting on non-existing site: {s}" + assert s in range( + self.length + ), f"Observable acting on non-existing site: {s}" if observable.gate.name == "entropy": - results[obs_index, column_index] = self.get_entropy(observable.sites) + results[obs_index, column_index] = self.get_entropy( + observable.sites + ) elif observable.gate.name == "schmidt_spectrum": - results[obs_index, column_index] = self.get_schmidt_spectrum(observable.sites) + results[obs_index, column_index] = self.get_schmidt_spectrum( + observable.sites + ) elif observable.gate.name == "pvm": - assert hasattr(observable.gate, "bitstring"), "Gate does not have attribute bitstring." - results[obs_index, column_index] = self.project_onto_bitstring(observable.gate.bitstring) + assert hasattr( + observable.gate, "bitstring" + ), "Gate does not have attribute bitstring." + results[obs_index, column_index] = self.project_onto_bitstring( + observable.gate.bitstring + ) else: - idx = observable.sites[0] if isinstance(observable.sites, list) else observable.sites + idx = ( + observable.sites[0] + if isinstance(observable.sites, list) + else observable.sites + ) if idx > last_site: for site in range(last_site, idx): temp_state.shift_orthogonality_center_right(site) @@ -732,16 +794,24 @@ def expect(self, observable: Observable) -> np.float64: elif isinstance(observable.sites, list): sites_list = observable.sites - assert sites_list is not None, f"Invalid type in expect {type(observable.sites).__name__}" + assert ( + sites_list is not None + ), f"Invalid type in expect {type(observable.sites).__name__}" - assert len(sites_list) < 3, "Only one- and two-site observables are currently implemented." + assert ( + len(sites_list) < 3 + ), "Only one- and two-site observables are currently implemented." for s in sites_list: - assert s in range(self.length), f"Observable acting on non-existing site: {s}" + assert s in range( + self.length + ), f"Observable acting on non-existing site: {s}" exp = self.local_expect(observable, sites_list) - assert exp.imag < 1e-13, f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." + assert ( + exp.imag < 1e-13 + ), f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." return exp.real def measure_single_shot(self) -> int: @@ -758,7 +828,9 @@ def measure_single_shot(self) -> int: temp_state = copy.deepcopy(self) bitstring = [] for site, tensor in enumerate(temp_state.tensors): - reduced_density_matrix = oe.contract("abc, dbc->ad", tensor, np.conj(tensor)) + reduced_density_matrix = oe.contract( + "abc, dbc->ad", tensor, np.conj(tensor) + ) probabilities = np.diag(reduced_density_matrix).real rng = np.random.default_rng() chosen_index = rng.choice(len(probabilities), p=probabilities) @@ -772,7 +844,9 @@ def measure_single_shot(self) -> int: temp_state.tensors[site + 1] = ( # noqa: B909 1 / np.sqrt(probabilities[chosen_index]) - * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) + * oe.contract( + "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] + ) ) return sum(c << i for i, c in enumerate(bitstring)) @@ -797,10 +871,14 @@ def measure_shots(self, shots: int) -> dict[int, int]: if shots > 1: max_workers = max(1, multiprocessing.cpu_count() - 1) with ( - concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor, + concurrent.futures.ProcessPoolExecutor( + max_workers=max_workers + ) as executor, tqdm(total=shots, desc="Measuring shots", ncols=80) as pbar, ): - futures = [executor.submit(self.measure_single_shot) for _ in range(shots)] + futures = [ + executor.submit(self.measure_single_shot) for _ in range(shots) + ] for future in concurrent.futures.as_completed(futures): result = future.result() results[result] = results.get(result, 0) + 1 @@ -824,7 +902,9 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: Returns: float: Probability of obtaining the given bitstring under projective measurement. """ - assert len(bitstring) == self.length, "Bitstring length must match number of sites" + assert ( + len(bitstring) == self.length + ), "Bitstring length must match number of sites" temp_state = copy.deepcopy(self) total_norm = 1.0 @@ -832,7 +912,9 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: state_index = int(char) tensor = temp_state.tensors[site] local_dim = self.physical_dimensions[site] - assert 0 <= state_index < local_dim, f"Invalid state index {state_index} at site {site}" + assert ( + 0 <= state_index < local_dim + ), f"Invalid state index {state_index} at site {site}" selected_state = np.zeros(local_dim) selected_state[state_index] = 1 @@ -849,7 +931,11 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: # Normalize and propagate if site != self.length - 1: temp_state.tensors[site + 1] = ( - 1 / norm * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) + 1 + / norm + * oe.contract( + "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] + ) ) return np.complex128(total_norm**2) @@ -1366,13 +1452,20 @@ def bose_hubbard( tensor[3, 3] = id_boson # build the full tensor list - tensors = [np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) for _ in range(length)] + tensors = [ + np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) + for _ in range(length) + ] # Left boundary: take only row 0 - tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype(np.complex128) + tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype( + np.complex128 + ) # Right boundary: take only col 3 - tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype(np.complex128) + tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype( + np.complex128 + ) mpo = cls() mpo.tensors = tensors @@ -1430,7 +1523,9 @@ def finite_state_machine( self.length = len(self.tensors) self.physical_dimension = self.tensors[0].shape[0] - def custom(self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True) -> None: + def custom( + self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True + ) -> None: """Custom MPO from tensors. Initialize the custom MPO (Matrix Product Operator) with the given tensors. @@ -1498,7 +1593,9 @@ def from_pauli_sum( self.physical_dimension = physical_dimension if not terms: - self.tensors = [np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length)] + self.tensors = [ + np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length) + ] return # 1. Parse terms into dense lists of operator names. @@ -1532,7 +1629,9 @@ def from_pauli_sum( term_trajectories[t_idx][length] = 0 # bond_state_maps[i] stores the mapping: (Op_str, Next_State_ID) -> Current_State_ID - bond_state_maps: list[dict[tuple[str, int], int]] = [{} for _ in range(length + 1)] + bond_state_maps: list[dict[tuple[str, int], int]] = [ + {} for _ in range(length + 1) + ] # Sweep Right-to-Left (sites L-1 down to 1) to build the FSM transitions. # We stop at bond 1. Bond 0 is always the single "Start" state. @@ -1598,7 +1697,9 @@ def from_pauli_sum( # The FSM construction is optimal for one-sided (suffix) uniqueness. # A standard two-sweep compression ("lr_rl") puts the MPO in canonical form # and removes any remaining redundancies (e.g., common prefixes). - self.compress(tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl") + self.compress( + tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl" + ) assert self.check_if_valid_mpo(), "MPO initialized wrong" def compress( @@ -1647,9 +1748,13 @@ def compress( for _ in range(n_sweeps): for direction in schedule: - self._compress_one_sweep(direction=direction, tol=tol, max_bond_dim=max_bond_dim) + self._compress_one_sweep( + direction=direction, tol=tol, max_bond_dim=max_bond_dim + ) - def _compress_one_sweep(self, *, direction: str, tol: float, max_bond_dim: int | None) -> None: + def _compress_one_sweep( + self, *, direction: str, tol: float, max_bond_dim: int | None + ) -> None: """Run one in-place MPO SVD compression sweep in the given direction. Args: @@ -1699,7 +1804,9 @@ def _compress_one_sweep(self, *, direction: str, tol: float, max_bond_dim: int | vh = vh[:keep, :] # Left tensor: (bond_dim_left, d, d, keep) -> (d, d, bond_dim_left, keep) - left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose(1, 2, 0, 3) + left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose( + 1, 2, 0, 3 + ) # Right tensor: (keep, d, d, bond_dim_right) -> (d, d, keep, bond_dim_right) svh = (s[:, None] * vh).reshape(keep, phys_dim, phys_dim, bond_dim_right) @@ -1738,7 +1845,10 @@ def to_mps(self) -> MPS: MPS: An MPS object containing the reshaped tensors. """ converted_tensors: list[NDArray[np.complex128]] = [ - np.reshape(tensor, (tensor.shape[0] * tensor.shape[1], tensor.shape[2], tensor.shape[3])) + np.reshape( + tensor, + (tensor.shape[0] * tensor.shape[1], tensor.shape[2], tensor.shape[3]), + ) for tensor in self.tensors ] @@ -1760,12 +1870,122 @@ def to_matrix(self) -> NDArray[np.complex128]: for tensor in self.tensors[1:]: mat = oe.contract("abcd, efdg->aebfcg", mat, tensor) mat = np.reshape( - mat, (mat.shape[0] * mat.shape[1], mat.shape[2] * mat.shape[3], mat.shape[4], mat.shape[5]) + mat, + ( + mat.shape[0] * mat.shape[1], + mat.shape[2] * mat.shape[3], + mat.shape[4], + mat.shape[5], + ), ) # Final left and right bonds should be 1 return np.squeeze(mat, axis=(2, 3)) + @classmethod + def from_matrix( + cls, + mat: np.ndarray, + d_in: int, + d_out: int, + max_bond: Optional[int] = None, + cutoff: float = 0.0, + dtype=np.complex128, + ) -> "MPO": + """ + Factorize a dense matrix into an MPO with uniform local dimensions. + + Each site has shape (d_in, d_out). The number of sites n is inferred from + + mat.shape = (d_in**n, d_out**n) + + Parameters + ---------- + mat : np.ndarray + Matrix of shape (d_in**n, d_out**n). + d_in : int + Input physical dimension per site. + d_out : int + Output physical dimension per site. + max_bond : Optional[int] + Max bond dimension. + cutoff : float + Singular values <= cutoff are discarded. + dtype : np.dtype + Tensor dtype. + """ + + rows, cols = mat.shape + + # --- infer length --- + n_rows = np.log(rows) / np.log(d_in) + n_cols = np.log(cols) / np.log(d_out) + + n = int(round(n_rows)) + + if not (np.isclose(n_rows, n) and np.isclose(n_cols, n)): + raise ValueError( + "Matrix dimensions are not compatible with uniform " + f"(d_in={d_in}, d_out={d_out}) MPO." + ) + + M = np.asarray(mat, dtype=dtype, order="F") + + left_rank = 1 + rem = M.reshape(1, rows, cols) + + tensors: List[np.ndarray] = [] + + def _truncate(s: np.ndarray) -> int: + r = s.size + if cutoff > 0.0: + r = int(np.sum(s > cutoff)) + r = max(r, 1) + if max_bond is not None: + r = min(r, max_bond) + return r + + # identical SVD sweep as before + for k in range(n - 1): + rest_rows = d_in ** (n - k - 1) + rest_cols = d_out ** (n - k - 1) + + rem = rem.reshape(left_rank, d_in, rest_rows, d_out, rest_cols) + + rem_perm = np.transpose(rem, (1, 3, 0, 2, 4)) + X = rem_perm.reshape(d_in * d_out * left_rank, rest_rows * rest_cols) + + U, s, Vh = np.linalg.svd(X, full_matrices=False) + + r_keep = _truncate(s) + U = U[:, :r_keep] + s = s[:r_keep] + Vh = Vh[:r_keep, :] + + T_k = U.reshape(d_in, d_out, left_rank, r_keep).astype(dtype, copy=False) + tensors.append(T_k) + + rem = (s[:, None] * Vh).reshape(r_keep, rest_rows, rest_cols) + left_rank = r_keep + + rem = rem.reshape(left_rank, d_in, d_out) + + T_last = ( + np.transpose(rem, (1, 2, 0)) + .reshape(d_in, d_out, left_rank, 1) + .astype(dtype, copy=False) + ) + tensors.append(T_last) + + mpo = cls() + mpo.tensors = tensors + mpo.length = n + mpo.physical_dimension = (d_in, d_out) + + assert mpo.check_if_valid_mpo(), "MPO initialized wrong" + + return mpo + def check_if_valid_mpo(self) -> bool: """MPO validity check. diff --git a/tests/core/data_structures/test_networks.py b/tests/core/data_structures/test_networks.py index 5f7bb904..ffa7e8f0 100644 --- a/tests/core/data_structures/test_networks.py +++ b/tests/core/data_structures/test_networks.py @@ -380,6 +380,24 @@ def test_custom() -> None: assert original.shape == created.shape assert np.allclose(original, created) +def test_from_matrix() + """Test that from_matrix() constructs a correct MPO. + + This test constructs a dense Bose-Hubbard Hamiltonian and creates an MPO via from_matrix(). This + is converted back via to_matrix and is compared to the original. The same is done for a random + matrix at maximal bond dimension. + """ + length = 5 + # local dimenstion + d = 3 + H = _bose_hubbard_dense(length,d,.9,.6,.2) + # exact down to bond dimension 4 + Hmpo = MPO.from_matrix(H,d,d,4) + assert np.allclose(H, Hmpo.to_matrix()) + + H=np.random.rand(d**length,d**length) + Hmpo = MPO.from_matrix(H,d,d,1000000) + assert np.allclose(H, Hmpo.to_matrix()) def test_to_mps() -> None: """Test converting an MPO to an MPS. From dee52966366ff07e5d7cbb2b68cb9f484d5f7d37 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 13 Feb 2026 14:39:40 +0000 Subject: [PATCH 02/25] =?UTF-8?q?=F0=9F=8E=A8=20pre-commit=20fixes?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/mqt/yaqs/core/data_structures/networks.py | 235 +++++------------- tests/core/data_structures/test_networks.py | 6 +- 2 files changed, 63 insertions(+), 178 deletions(-) diff --git a/src/mqt/yaqs/core/data_structures/networks.py b/src/mqt/yaqs/core/data_structures/networks.py index 986b540d..29cfa8d0 100644 --- a/src/mqt/yaqs/core/data_structures/networks.py +++ b/src/mqt/yaqs/core/data_structures/networks.py @@ -174,9 +174,7 @@ def __init__( vector[0] = rng.random() vector[1] = 1 - vector[0] elif state == "basis": - assert ( - basis_string is not None - ), "basis_string must be provided for 'basis' state initialization." + assert basis_string is not None, "basis_string must be provided for 'basis' state initialization." self.init_mps_from_basis(basis_string, self.physical_dimensions) break else: @@ -193,9 +191,7 @@ def __init__( if pad is not None: self.pad_bond_dimension(pad) - def init_mps_from_basis( - self, basis_string: str, physical_dimensions: list[int] - ) -> None: + def init_mps_from_basis(self, basis_string: str, physical_dimensions: list[int]) -> None: """Initialize a list of MPS tensors representing a product state from a basis string. Args: @@ -354,12 +350,8 @@ def get_schmidt_spectrum(self, sites: list[int]) -> NDArray[np.float64]: NDArray[np.float64]: The Schmidt spectrum (length 500), with unused entries filled with NaN. """ - assert ( - len(sites) == 2 - ), "Schmidt spectrum is defined on a bond (two adjacent sites)." - assert ( - sites[0] + 1 == sites[1] - ), "Schmidt spectrum only defined for nearest-neighbor cut." + assert len(sites) == 2, "Schmidt spectrum is defined on a bond (two adjacent sites)." + assert sites[0] + 1 == sites[1], "Schmidt spectrum only defined for nearest-neighbor cut." top_schmidt_vals = 500 i, j = sites a, b = self.tensors[i], self.tensors[j] @@ -415,9 +407,7 @@ def almost_equal(self, other: MPS) -> bool: return False return True - def shift_orthogonality_center_right( - self, current_orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def shift_orthogonality_center_right(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: """Shifts orthogonality center right. This function performs a QR decomposition to shift the known current center to the right and move @@ -451,9 +441,7 @@ def shift_orthogonality_center_right( self.tensors[current_orthogonality_center + 1], ) = (a_new, b_new) - def shift_orthogonality_center_left( - self, current_orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def shift_orthogonality_center_left(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: """Shifts orthogonality center left. This function flips the network, performs a right shift, then flips the network again. @@ -464,14 +452,10 @@ def shift_orthogonality_center_left( Default is QR. """ self.flip_network() - self.shift_orthogonality_center_right( - self.length - current_orthogonality_center - 1, decomposition - ) + self.shift_orthogonality_center_right(self.length - current_orthogonality_center - 1, decomposition) self.flip_network() - def set_canonical_form( - self, orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def set_canonical_form(self, orthogonality_center: int, decomposition: str = "QR") -> None: """Sets canonical form of MPS. Left and right normalizes an MPS around a selected site. @@ -482,9 +466,7 @@ def set_canonical_form( decomposition: Type of decomposition. Default QR. """ - def sweep_decomposition( - orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def sweep_decomposition(orthogonality_center: int, decomposition: str = "QR") -> None: for site, _ in enumerate(self.tensors): if site == orthogonality_center: break @@ -515,17 +497,13 @@ def normalize(self, form: str = "B", decomposition: str = "QR") -> None: if form == "B": self.flip_network() - self.set_canonical_form( - orthogonality_center=self.length - 1, decomposition=decomposition - ) + self.set_canonical_form(orthogonality_center=self.length - 1, decomposition=decomposition) self.shift_orthogonality_center_right(self.length - 1, decomposition) if form == "B": self.flip_network() - def truncate( - self, threshold: float = 1e-12, max_bond_dim: int | None = None - ) -> None: + def truncate(self, threshold: float = 1e-12, max_bond_dim: int | None = None) -> None: """In-place MPS truncation via repeated two-site SVDs.""" orth_center = self.check_canonical_form()[0] if self.length == 1: @@ -547,9 +525,7 @@ def truncate( self.flip_network() - def scalar_product( - self, other: MPS, sites: int | list[int] | None = None - ) -> np.complex128: + def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np.complex128: """Compute the scalar (inner) product between two Matrix Product States (MPS). The function contracts the corresponding tensors of two MPS objects. If no specific site is @@ -576,12 +552,8 @@ def scalar_product( result = None for idx in range(self.length): # contract at each site into a 4-leg tensor - theta = oe.contract( - "abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx] - ) - result = ( - theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) - ) + theta = oe.contract("abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx]) + result = theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) # squeeze down to scalar assert result is not None return np.complex128(np.squeeze(result)) @@ -613,9 +585,7 @@ def scalar_product( msg = f"Invalid `sites` argument: {sites!r}" raise ValueError(msg) - def local_expect( - self, operator: Observable, sites: int | list[int] - ) -> np.complex128: + def local_expect(self, operator: Observable, sites: int | list[int]) -> np.complex128: """Compute the local expectation value of an operator on an MPS. The function applies the given operator to the tensor at the specified site of a deep copy of the @@ -641,17 +611,11 @@ def local_expect( i = sites if isinstance(operator.sites, list): - assert ( - operator.sites[0] == i - ), f"Operator sites mismatch {operator.sites[0]}, {i}" + assert operator.sites[0] == i, f"Operator sites mismatch {operator.sites[0]}, {i}" elif isinstance(operator.sites, int): - assert ( - operator.sites == i - ), f"Operator sites mismatch {operator.sites}, {i}" + assert operator.sites == i, f"Operator sites mismatch {operator.sites}, {i}" - assert ( - i is not None - ), f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" + assert i is not None, f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" a = temp_state.tensors[i] temp_state.tensors[i] = oe.contract("ab, bcd->acd", operator.gate.matrix, a) @@ -662,12 +626,10 @@ def local_expect( assert operator.sites[0] == i, "Observable sites mismatch" assert operator.sites[1] == j, "Observable sites mismatch" - assert ( - operator.sites[0] < operator.sites[1] - ), "Observable sites must be in ascending order." - assert ( - operator.sites[1] - operator.sites[0] == 1 - ), "Only nearest-neighbor observables are currently implemented." + assert operator.sites[0] < operator.sites[1], "Observable sites must be in ascending order." + assert operator.sites[1] - operator.sites[0] == 1, ( + "Only nearest-neighbor observables are currently implemented." + ) a = temp_state.tensors[i] b = temp_state.tensors[j] d_i, left, _ = a.shape @@ -679,9 +641,7 @@ def local_expect( theta = theta.reshape(left, d_i * d_j, right) # (l, d_i*d_j, r) # 2) apply operator on the combined phys index - theta = oe.contract( - "ab, cbd->cad", operator.gate.matrix, theta - ) # (l, d_i*d_j, r) + theta = oe.contract("ab, cbd->cad", operator.gate.matrix, theta) # (l, d_i*d_j, r) theta = theta.reshape(left, d_i, d_j, right) # back to (l, d_i, d_j, r) # 3) split via SVD @@ -694,9 +654,7 @@ def local_expect( u_tensor = u_mat.reshape(left, d_i, chi_new) # (l, d_i, r_new) a_new = u_tensor.transpose(1, 0, 2) # → (d_i, l, r_new) - v_tensor = (np.diag(s_vec) @ v_mat).reshape( - chi_new, d_j, right - ) # (l_new, d_j, r) + v_tensor = (np.diag(s_vec) @ v_mat).reshape(chi_new, d_j, right) # (l_new, d_j, r) b_new = v_tensor.transpose(1, 0, 2) # → (d_j, l_new, r) temp_state.tensors[i] = a_new @@ -732,44 +690,24 @@ def evaluate_observables( elif observable.gate.name == "total_bond": results[obs_index, column_index] = self.get_total_bond() elif observable.gate.name in {"entropy", "schmidt_spectrum"}: - assert isinstance( - observable.sites, list - ), "Given metric requires a list of sites" - assert ( - len(observable.sites) == 2 - ), "Given metric requires 2 sites to act on." + assert isinstance(observable.sites, list), "Given metric requires a list of sites" + assert len(observable.sites) == 2, "Given metric requires 2 sites to act on." max_site = max(observable.sites) min_site = min(observable.sites) - assert ( - max_site - min_site == 1 - ), "Entropy and Schmidt cuts must be nearest neighbor." + assert max_site - min_site == 1, "Entropy and Schmidt cuts must be nearest neighbor." for s in observable.sites: - assert s in range( - self.length - ), f"Observable acting on non-existing site: {s}" + assert s in range(self.length), f"Observable acting on non-existing site: {s}" if observable.gate.name == "entropy": - results[obs_index, column_index] = self.get_entropy( - observable.sites - ) + results[obs_index, column_index] = self.get_entropy(observable.sites) elif observable.gate.name == "schmidt_spectrum": - results[obs_index, column_index] = self.get_schmidt_spectrum( - observable.sites - ) + results[obs_index, column_index] = self.get_schmidt_spectrum(observable.sites) elif observable.gate.name == "pvm": - assert hasattr( - observable.gate, "bitstring" - ), "Gate does not have attribute bitstring." - results[obs_index, column_index] = self.project_onto_bitstring( - observable.gate.bitstring - ) + assert hasattr(observable.gate, "bitstring"), "Gate does not have attribute bitstring." + results[obs_index, column_index] = self.project_onto_bitstring(observable.gate.bitstring) else: - idx = ( - observable.sites[0] - if isinstance(observable.sites, list) - else observable.sites - ) + idx = observable.sites[0] if isinstance(observable.sites, list) else observable.sites if idx > last_site: for site in range(last_site, idx): temp_state.shift_orthogonality_center_right(site) @@ -794,24 +732,16 @@ def expect(self, observable: Observable) -> np.float64: elif isinstance(observable.sites, list): sites_list = observable.sites - assert ( - sites_list is not None - ), f"Invalid type in expect {type(observable.sites).__name__}" + assert sites_list is not None, f"Invalid type in expect {type(observable.sites).__name__}" - assert ( - len(sites_list) < 3 - ), "Only one- and two-site observables are currently implemented." + assert len(sites_list) < 3, "Only one- and two-site observables are currently implemented." for s in sites_list: - assert s in range( - self.length - ), f"Observable acting on non-existing site: {s}" + assert s in range(self.length), f"Observable acting on non-existing site: {s}" exp = self.local_expect(observable, sites_list) - assert ( - exp.imag < 1e-13 - ), f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." + assert exp.imag < 1e-13, f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." return exp.real def measure_single_shot(self) -> int: @@ -828,9 +758,7 @@ def measure_single_shot(self) -> int: temp_state = copy.deepcopy(self) bitstring = [] for site, tensor in enumerate(temp_state.tensors): - reduced_density_matrix = oe.contract( - "abc, dbc->ad", tensor, np.conj(tensor) - ) + reduced_density_matrix = oe.contract("abc, dbc->ad", tensor, np.conj(tensor)) probabilities = np.diag(reduced_density_matrix).real rng = np.random.default_rng() chosen_index = rng.choice(len(probabilities), p=probabilities) @@ -844,9 +772,7 @@ def measure_single_shot(self) -> int: temp_state.tensors[site + 1] = ( # noqa: B909 1 / np.sqrt(probabilities[chosen_index]) - * oe.contract( - "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] - ) + * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) ) return sum(c << i for i, c in enumerate(bitstring)) @@ -871,14 +797,10 @@ def measure_shots(self, shots: int) -> dict[int, int]: if shots > 1: max_workers = max(1, multiprocessing.cpu_count() - 1) with ( - concurrent.futures.ProcessPoolExecutor( - max_workers=max_workers - ) as executor, + concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor, tqdm(total=shots, desc="Measuring shots", ncols=80) as pbar, ): - futures = [ - executor.submit(self.measure_single_shot) for _ in range(shots) - ] + futures = [executor.submit(self.measure_single_shot) for _ in range(shots)] for future in concurrent.futures.as_completed(futures): result = future.result() results[result] = results.get(result, 0) + 1 @@ -902,9 +824,7 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: Returns: float: Probability of obtaining the given bitstring under projective measurement. """ - assert ( - len(bitstring) == self.length - ), "Bitstring length must match number of sites" + assert len(bitstring) == self.length, "Bitstring length must match number of sites" temp_state = copy.deepcopy(self) total_norm = 1.0 @@ -912,9 +832,7 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: state_index = int(char) tensor = temp_state.tensors[site] local_dim = self.physical_dimensions[site] - assert ( - 0 <= state_index < local_dim - ), f"Invalid state index {state_index} at site {site}" + assert 0 <= state_index < local_dim, f"Invalid state index {state_index} at site {site}" selected_state = np.zeros(local_dim) selected_state[state_index] = 1 @@ -931,11 +849,7 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: # Normalize and propagate if site != self.length - 1: temp_state.tensors[site + 1] = ( - 1 - / norm - * oe.contract( - "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] - ) + 1 / norm * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) ) return np.complex128(total_norm**2) @@ -1452,20 +1366,13 @@ def bose_hubbard( tensor[3, 3] = id_boson # build the full tensor list - tensors = [ - np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) - for _ in range(length) - ] + tensors = [np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) for _ in range(length)] # Left boundary: take only row 0 - tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype( - np.complex128 - ) + tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype(np.complex128) # Right boundary: take only col 3 - tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype( - np.complex128 - ) + tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype(np.complex128) mpo = cls() mpo.tensors = tensors @@ -1523,9 +1430,7 @@ def finite_state_machine( self.length = len(self.tensors) self.physical_dimension = self.tensors[0].shape[0] - def custom( - self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True - ) -> None: + def custom(self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True) -> None: """Custom MPO from tensors. Initialize the custom MPO (Matrix Product Operator) with the given tensors. @@ -1593,9 +1498,7 @@ def from_pauli_sum( self.physical_dimension = physical_dimension if not terms: - self.tensors = [ - np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length) - ] + self.tensors = [np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length)] return # 1. Parse terms into dense lists of operator names. @@ -1629,9 +1532,7 @@ def from_pauli_sum( term_trajectories[t_idx][length] = 0 # bond_state_maps[i] stores the mapping: (Op_str, Next_State_ID) -> Current_State_ID - bond_state_maps: list[dict[tuple[str, int], int]] = [ - {} for _ in range(length + 1) - ] + bond_state_maps: list[dict[tuple[str, int], int]] = [{} for _ in range(length + 1)] # Sweep Right-to-Left (sites L-1 down to 1) to build the FSM transitions. # We stop at bond 1. Bond 0 is always the single "Start" state. @@ -1697,9 +1598,7 @@ def from_pauli_sum( # The FSM construction is optimal for one-sided (suffix) uniqueness. # A standard two-sweep compression ("lr_rl") puts the MPO in canonical form # and removes any remaining redundancies (e.g., common prefixes). - self.compress( - tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl" - ) + self.compress(tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl") assert self.check_if_valid_mpo(), "MPO initialized wrong" def compress( @@ -1748,13 +1647,9 @@ def compress( for _ in range(n_sweeps): for direction in schedule: - self._compress_one_sweep( - direction=direction, tol=tol, max_bond_dim=max_bond_dim - ) + self._compress_one_sweep(direction=direction, tol=tol, max_bond_dim=max_bond_dim) - def _compress_one_sweep( - self, *, direction: str, tol: float, max_bond_dim: int | None - ) -> None: + def _compress_one_sweep(self, *, direction: str, tol: float, max_bond_dim: int | None) -> None: """Run one in-place MPO SVD compression sweep in the given direction. Args: @@ -1804,9 +1699,7 @@ def _compress_one_sweep( vh = vh[:keep, :] # Left tensor: (bond_dim_left, d, d, keep) -> (d, d, bond_dim_left, keep) - left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose( - 1, 2, 0, 3 - ) + left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose(1, 2, 0, 3) # Right tensor: (keep, d, d, bond_dim_right) -> (d, d, keep, bond_dim_right) svh = (s[:, None] * vh).reshape(keep, phys_dim, phys_dim, bond_dim_right) @@ -1891,9 +1784,8 @@ def from_matrix( max_bond: Optional[int] = None, cutoff: float = 0.0, dtype=np.complex128, - ) -> "MPO": - """ - Factorize a dense matrix into an MPO with uniform local dimensions. + ) -> MPO: + """Factorize a dense matrix into an MPO with uniform local dimensions. Each site has shape (d_in, d_out). The number of sites n is inferred from @@ -1914,20 +1806,17 @@ def from_matrix( dtype : np.dtype Tensor dtype. """ - rows, cols = mat.shape # --- infer length --- n_rows = np.log(rows) / np.log(d_in) n_cols = np.log(cols) / np.log(d_out) - n = int(round(n_rows)) + n = round(n_rows) if not (np.isclose(n_rows, n) and np.isclose(n_cols, n)): - raise ValueError( - "Matrix dimensions are not compatible with uniform " - f"(d_in={d_in}, d_out={d_out}) MPO." - ) + msg = f"Matrix dimensions are not compatible with uniform (d_in={d_in}, d_out={d_out}) MPO." + raise ValueError(msg) M = np.asarray(mat, dtype=dtype, order="F") @@ -1970,11 +1859,7 @@ def _truncate(s: np.ndarray) -> int: rem = rem.reshape(left_rank, d_in, d_out) - T_last = ( - np.transpose(rem, (1, 2, 0)) - .reshape(d_in, d_out, left_rank, 1) - .astype(dtype, copy=False) - ) + T_last = np.transpose(rem, (1, 2, 0)).reshape(d_in, d_out, left_rank, 1).astype(dtype, copy=False) tensors.append(T_last) mpo = cls() diff --git a/tests/core/data_structures/test_networks.py b/tests/core/data_structures/test_networks.py index ffa7e8f0..7c7a018c 100644 --- a/tests/core/data_structures/test_networks.py +++ b/tests/core/data_structures/test_networks.py @@ -384,11 +384,11 @@ def test_from_matrix() """Test that from_matrix() constructs a correct MPO. This test constructs a dense Bose-Hubbard Hamiltonian and creates an MPO via from_matrix(). This - is converted back via to_matrix and is compared to the original. The same is done for a random + is converted back via to_matrix and is compared to the original. The same is done for a random matrix at maximal bond dimension. - """ + """ length = 5 - # local dimenstion + # local dimension d = 3 H = _bose_hubbard_dense(length,d,.9,.6,.2) # exact down to bond dimension 4 From 1105ef989c68196e5fe76a0cf964ba2e9a914506 Mon Sep 17 00:00:00 2001 From: Lukas Date: Fri, 13 Feb 2026 15:41:33 +0100 Subject: [PATCH 03/25] bugfix in test_from_matrix --- tests/core/data_structures/test_networks.py | 140 +++++++++++++++----- 1 file changed, 107 insertions(+), 33 deletions(-) diff --git a/tests/core/data_structures/test_networks.py b/tests/core/data_structures/test_networks.py index ffa7e8f0..2f4de419 100644 --- a/tests/core/data_structures/test_networks.py +++ b/tests/core/data_structures/test_networks.py @@ -64,7 +64,9 @@ def _embed_one_body(op: np.ndarray, length: int, i: int) -> np.ndarray: return out -def _embed_two_body(op1: np.ndarray, op2: np.ndarray, length: int, i: int) -> np.ndarray: +def _embed_two_body( + op1: np.ndarray, op2: np.ndarray, length: int, i: int +) -> np.ndarray: """Embed a nearest-neighbor two-site operator into a length-L qubit Hilbert space. Args: @@ -113,7 +115,9 @@ def _ising_dense(length: int, j_val: float, g: float) -> np.ndarray: return H -def _heisenberg_dense(length: int, jx: float, jy: float, jz: float, h: float) -> np.ndarray: +def _heisenberg_dense( + length: int, jx: float, jy: float, jz: float, h: float +) -> np.ndarray: """Construct the dense Heisenberg Hamiltonian for an open chain. The Hamiltonian is @@ -142,7 +146,9 @@ def _heisenberg_dense(length: int, jx: float, jy: float, jz: float, h: float) -> return H -def _bose_hubbard_dense(length: int, local_dim: int, omega: float, hopping_j: float, hubbard_u: float) -> np.ndarray: +def _bose_hubbard_dense( + length: int, local_dim: int, omega: float, hopping_j: float, hubbard_u: float +) -> np.ndarray: """Construct the exact dense Bose-Hubbard Hamiltonian for comparison. Returns: @@ -204,7 +210,9 @@ def untranspose_block(mpo_tensor: NDArray[np.complex128]) -> NDArray[np.complex1 def crandn( - size: int | tuple[int, ...], *args: int, seed: np.random.Generator | int | None = None + size: int | tuple[int, ...], + *args: int, + seed: np.random.Generator | int | None = None, ) -> NDArray[np.complex128]: """Draw random samples from the standard complex normal distribution. @@ -222,7 +230,10 @@ def crandn( size = (size,) rng = np.random.default_rng(seed) # 1 / sqrt(2) is a normalization factor - return np.asarray((rng.standard_normal(size) + 1j * rng.standard_normal(size)) / np.sqrt(2), dtype=np.complex128) + return np.asarray( + (rng.standard_normal(size) + 1j * rng.standard_normal(size)) / np.sqrt(2), + dtype=np.complex128, + ) def random_mps(shapes: list[tuple[int, int, int]], *, normalize: bool = True) -> MPS: @@ -295,7 +306,9 @@ def test_bose_hubbard_correct_operator() -> None: assert mpo.length == length assert mpo.physical_dimension == local_dim assert len(mpo.tensors) == length - assert all(t.shape[2] <= 4 and t.shape[3] <= 4 for t in mpo.tensors), "Bond dimension should be 4" + assert all( + t.shape[2] <= 4 and t.shape[3] <= 4 for t in mpo.tensors + ), "Bond dimension should be 4" # Dense comparison H_dense = _bose_hubbard_dense(length, local_dim, omega, J, U) @@ -380,25 +393,27 @@ def test_custom() -> None: assert original.shape == created.shape assert np.allclose(original, created) -def test_from_matrix() + +def test_from_matrix() -> None: """Test that from_matrix() constructs a correct MPO. This test constructs a dense Bose-Hubbard Hamiltonian and creates an MPO via from_matrix(). This - is converted back via to_matrix and is compared to the original. The same is done for a random + is converted back via to_matrix and is compared to the original. The same is done for a random matrix at maximal bond dimension. - """ + """ length = 5 # local dimenstion d = 3 - H = _bose_hubbard_dense(length,d,.9,.6,.2) + H = _bose_hubbard_dense(length, d, 0.9, 0.6, 0.2) # exact down to bond dimension 4 - Hmpo = MPO.from_matrix(H,d,d,4) + Hmpo = MPO.from_matrix(H, d, d, 4) assert np.allclose(H, Hmpo.to_matrix()) - H=np.random.rand(d**length,d**length) - Hmpo = MPO.from_matrix(H,d,d,1000000) + H = np.random.rand(d**length, d**length) + Hmpo = MPO.from_matrix(H, d, d, 1000000) assert np.allclose(H, Hmpo.to_matrix()) + def test_to_mps() -> None: """Test converting an MPO to an MPS. @@ -449,7 +464,12 @@ def test_rotate() -> None: mpo.rotate(conjugate=False) for orig, rotated in zip(original_tensors, mpo.tensors, strict=False): - assert rotated.shape == (orig.shape[1], orig.shape[0], orig.shape[2], orig.shape[3]) + assert rotated.shape == ( + orig.shape[1], + orig.shape[0], + orig.shape[2], + orig.shape[3], + ) np.testing.assert_allclose(rotated, np.transpose(orig, (1, 0, 2, 3))) mpo.rotate(conjugate=True) @@ -477,7 +497,9 @@ def test_check_if_identity() -> None: ############################################################################## -@pytest.mark.parametrize("state", ["zeros", "ones", "x+", "x-", "y+", "y-", "Neel", "wall", "basis"]) +@pytest.mark.parametrize( + "state", ["zeros", "ones", "x+", "x-", "y+", "y-", "Neel", "wall", "basis"] +) def test_mps_initialization(state: str) -> None: """Test that MPS initializes with the correct chain length, physical dimensions, and tensor shapes. @@ -492,7 +514,12 @@ def test_mps_initialization(state: str) -> None: basis_string = "1001" if state == "basis": - mps = MPS(length=length, physical_dimensions=[pdim] * length, state=state, basis_string=basis_string) + mps = MPS( + length=length, + physical_dimensions=[pdim] * length, + state=state, + basis_string=basis_string, + ) else: mps = MPS(length=length, physical_dimensions=[pdim] * length, state=state) @@ -526,10 +553,18 @@ def test_mps_initialization(state: str) -> None: expected = np.array([1, -1j], dtype=complex) / np.sqrt(2) np.testing.assert_allclose(vec, expected) elif state == "Neel": - expected = np.array([1, 0], dtype=complex) if i % 2 else np.array([0, 1], dtype=complex) + expected = ( + np.array([1, 0], dtype=complex) + if i % 2 + else np.array([0, 1], dtype=complex) + ) np.testing.assert_allclose(vec, expected) elif state == "wall": - expected = np.array([1, 0], dtype=complex) if i < length // 2 else np.array([0, 1], dtype=complex) + expected = ( + np.array([1, 0], dtype=complex) + if i < length // 2 + else np.array([0, 1], dtype=complex) + ) np.testing.assert_allclose(vec, expected) elif state == "basis": bit = int(basis_string[i]) @@ -570,12 +605,20 @@ def test_flip_network() -> None: t2 = rng.random(size=(pdim, 2, 2)).astype(np.complex128) t3 = rng.random(size=(pdim, 2, 1)).astype(np.complex128) original_tensors = [t1, t2, t3] - mps = MPS(length, tensors=copy.deepcopy(original_tensors), physical_dimensions=[pdim] * length) + mps = MPS( + length, + tensors=copy.deepcopy(original_tensors), + physical_dimensions=[pdim] * length, + ) mps.flip_network() flipped_tensors = mps.tensors assert len(flipped_tensors) == length - assert flipped_tensors[0].shape == (pdim, original_tensors[2].shape[2], original_tensors[2].shape[1]) + assert flipped_tensors[0].shape == ( + pdim, + original_tensors[2].shape[2], + original_tensors[2].shape[1], + ) mps.flip_network() for orig, now in zip(original_tensors, mps.tensors, strict=False): assert np.allclose(orig, now) @@ -888,7 +931,9 @@ def test_convert_to_vector_fidelity() -> None: # Define the simulation parameters sim_params = StrongSimParams( - observables=[Observable(Z(), site) for site in range(num_qubits)], get_state=True, show_progress=False + observables=[Observable(Z(), site) for site in range(num_qubits)], + get_state=True, + show_progress=False, ) simulator.run(state, circ, sim_params) assert sim_params.output_state is not None @@ -912,7 +957,9 @@ def test_convert_to_vector_fidelity_long_range() -> None: # Define the simulation parameters sim_params = StrongSimParams( - observables=[Observable(Z(), site) for site in range(num_qubits)], get_state=True, show_progress=False + observables=[Observable(Z(), site) for site in range(num_qubits)], + get_state=True, + show_progress=False, ) simulator.run(state, circ, sim_params) assert sim_params.output_state is not None @@ -969,7 +1016,9 @@ def test_pad_raises_on_shrink() -> None: mps = MPS(length=5, state="zeros") mps.pad_bond_dimension(4) # enlarge first - with pytest.raises(ValueError, match="Target bond dim must be at least current bond dim"): + with pytest.raises( + ValueError, match="Target bond dim must be at least current bond dim" + ): mps.pad_bond_dimension(2) # would shrink - must fail @@ -1195,7 +1244,9 @@ def test_evaluate_observables_diagnostics_and_meta_then_pvm_separately() -> None Observable(GateLibrary.entropy(), [1, 2]), Observable(GateLibrary.schmidt_spectrum(), [1, 2]), ] - sim_diag = AnalogSimParams(diagnostics_and_meta, elapsed_time=0.1, dt=0.1, show_progress=False) + sim_diag = AnalogSimParams( + diagnostics_and_meta, elapsed_time=0.1, dt=0.1, show_progress=False + ) results_diag = np.empty((len(diagnostics_and_meta), 2), dtype=object) mps.evaluate_observables(sim_diag, results_diag, column_index=0) @@ -1261,7 +1312,10 @@ def test_evaluate_observables_meta_validation_errors() -> None: # Wrong length (entropy expects exactly two adjacent indices) sim_bad_len = AnalogSimParams( - [Observable(GateLibrary.entropy(), [1])], elapsed_time=0.1, dt=0.1, show_progress=False + [Observable(GateLibrary.entropy(), [1])], + elapsed_time=0.1, + dt=0.1, + show_progress=False, ) results_len = np.empty((1, 1), dtype=np.float64) with pytest.raises(AssertionError): @@ -1269,7 +1323,10 @@ def test_evaluate_observables_meta_validation_errors() -> None: # Non-adjacent Schmidt cut sim_non_adj = AnalogSimParams( - [Observable(GateLibrary.schmidt_spectrum(), [0, 2])], elapsed_time=0.1, dt=0.1, show_progress=False + [Observable(GateLibrary.schmidt_spectrum(), [0, 2])], + elapsed_time=0.1, + dt=0.1, + show_progress=False, ) results_adj = np.empty((1, 1), dtype=object) with pytest.raises(AssertionError): @@ -1340,7 +1397,9 @@ def test_from_pauli_sum_raises_on_nonpositive_length() -> None: mpo.from_pauli_sum(terms=[(1.0, "Z0")], length=-5) -def test_from_pauli_sum_raises_on_site_index_out_of_bounds(monkeypatch: pytest.MonkeyPatch) -> None: +def test_from_pauli_sum_raises_on_site_index_out_of_bounds( + monkeypatch: pytest.MonkeyPatch, +) -> None: """Pauli-sum MPO validation: parsed site indices outside [0, L-1] must raise.""" mpo = MPO() @@ -1351,7 +1410,9 @@ def test_from_pauli_sum_raises_on_site_index_out_of_bounds(monkeypatch: pytest.M mpo.from_pauli_sum(terms=[(1.0, "Z0")], length=4) -def test_from_pauli_sum_raises_on_invalid_local_op_label(monkeypatch: pytest.MonkeyPatch) -> None: +def test_from_pauli_sum_raises_on_invalid_local_op_label( + monkeypatch: pytest.MonkeyPatch, +) -> None: """Pauli-sum MPO validation: parsed local operator labels must be in _VALID.""" mpo = MPO() @@ -1385,14 +1446,22 @@ def test_compress_raises_on_invalid_directions() -> None: """MPO compress input validation: invalid sweep schedule strings must raise.""" mpo = MPO() mpo.tensors = [np.zeros((2, 2, 1, 1), dtype=complex)] - with pytest.raises(ValueError, match=r"directions must be one of \{'lr', 'rl', 'lr_rl', 'rl_lr'\}\."): + with pytest.raises( + ValueError, + match=r"directions must be one of \{'lr', 'rl', 'lr_rl', 'rl_lr'\}\.", + ): mpo.compress(directions="lr,rl") -def test_compress_n_sweeps_zero_returns_without_calling_sweeps(monkeypatch: pytest.MonkeyPatch) -> None: +def test_compress_n_sweeps_zero_returns_without_calling_sweeps( + monkeypatch: pytest.MonkeyPatch, +) -> None: """MPO compress control flow: n_sweeps=0 must return without invoking sweeps.""" mpo = MPO() - mpo.tensors = [np.zeros((2, 2, 1, 1), dtype=complex), np.zeros((2, 2, 1, 1), dtype=complex)] + mpo.tensors = [ + np.zeros((2, 2, 1, 1), dtype=complex), + np.zeros((2, 2, 1, 1), dtype=complex), + ] called = False @@ -1411,9 +1480,14 @@ def boom(**_kwargs: object) -> None: def test_compress_one_sweep_raises_on_invalid_direction() -> None: """MPO _compress_one_sweep input validation: direction must be 'lr' or 'rl'.""" mpo = MPO() - mpo.tensors = [np.zeros((2, 2, 1, 1), dtype=complex), np.zeros((2, 2, 1, 1), dtype=complex)] + mpo.tensors = [ + np.zeros((2, 2, 1, 1), dtype=complex), + np.zeros((2, 2, 1, 1), dtype=complex), + ] with pytest.raises(ValueError, match=r"direction must be 'lr' or 'rl'\."): - mpo._compress_one_sweep(direction="xx", tol=1e-12, max_bond_dim=None) # noqa: SLF001 + mpo._compress_one_sweep( + direction="xx", tol=1e-12, max_bond_dim=None + ) # noqa: SLF001 def test_from_pauli_sum_empty_spec_is_identity_term() -> None: From d992e93b719fd2c153322a074e4e3f2114e467ec Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 13 Feb 2026 14:43:08 +0000 Subject: [PATCH 04/25] =?UTF-8?q?=F0=9F=8E=A8=20pre-commit=20fixes?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tests/core/data_structures/test_networks.py | 44 +++++---------------- 1 file changed, 10 insertions(+), 34 deletions(-) diff --git a/tests/core/data_structures/test_networks.py b/tests/core/data_structures/test_networks.py index 0ea7f987..31f25a4b 100644 --- a/tests/core/data_structures/test_networks.py +++ b/tests/core/data_structures/test_networks.py @@ -64,9 +64,7 @@ def _embed_one_body(op: np.ndarray, length: int, i: int) -> np.ndarray: return out -def _embed_two_body( - op1: np.ndarray, op2: np.ndarray, length: int, i: int -) -> np.ndarray: +def _embed_two_body(op1: np.ndarray, op2: np.ndarray, length: int, i: int) -> np.ndarray: """Embed a nearest-neighbor two-site operator into a length-L qubit Hilbert space. Args: @@ -115,9 +113,7 @@ def _ising_dense(length: int, j_val: float, g: float) -> np.ndarray: return H -def _heisenberg_dense( - length: int, jx: float, jy: float, jz: float, h: float -) -> np.ndarray: +def _heisenberg_dense(length: int, jx: float, jy: float, jz: float, h: float) -> np.ndarray: """Construct the dense Heisenberg Hamiltonian for an open chain. The Hamiltonian is @@ -146,9 +142,7 @@ def _heisenberg_dense( return H -def _bose_hubbard_dense( - length: int, local_dim: int, omega: float, hopping_j: float, hubbard_u: float -) -> np.ndarray: +def _bose_hubbard_dense(length: int, local_dim: int, omega: float, hopping_j: float, hubbard_u: float) -> np.ndarray: """Construct the exact dense Bose-Hubbard Hamiltonian for comparison. Returns: @@ -306,9 +300,7 @@ def test_bose_hubbard_correct_operator() -> None: assert mpo.length == length assert mpo.physical_dimension == local_dim assert len(mpo.tensors) == length - assert all( - t.shape[2] <= 4 and t.shape[3] <= 4 for t in mpo.tensors - ), "Bond dimension should be 4" + assert all(t.shape[2] <= 4 and t.shape[3] <= 4 for t in mpo.tensors), "Bond dimension should be 4" # Dense comparison H_dense = _bose_hubbard_dense(length, local_dim, omega, J, U) @@ -497,9 +489,7 @@ def test_check_if_identity() -> None: ############################################################################## -@pytest.mark.parametrize( - "state", ["zeros", "ones", "x+", "x-", "y+", "y-", "Neel", "wall", "basis"] -) +@pytest.mark.parametrize("state", ["zeros", "ones", "x+", "x-", "y+", "y-", "Neel", "wall", "basis"]) def test_mps_initialization(state: str) -> None: """Test that MPS initializes with the correct chain length, physical dimensions, and tensor shapes. @@ -553,18 +543,10 @@ def test_mps_initialization(state: str) -> None: expected = np.array([1, -1j], dtype=complex) / np.sqrt(2) np.testing.assert_allclose(vec, expected) elif state == "Neel": - expected = ( - np.array([1, 0], dtype=complex) - if i % 2 - else np.array([0, 1], dtype=complex) - ) + expected = np.array([1, 0], dtype=complex) if i % 2 else np.array([0, 1], dtype=complex) np.testing.assert_allclose(vec, expected) elif state == "wall": - expected = ( - np.array([1, 0], dtype=complex) - if i < length // 2 - else np.array([0, 1], dtype=complex) - ) + expected = np.array([1, 0], dtype=complex) if i < length // 2 else np.array([0, 1], dtype=complex) np.testing.assert_allclose(vec, expected) elif state == "basis": bit = int(basis_string[i]) @@ -1016,9 +998,7 @@ def test_pad_raises_on_shrink() -> None: mps = MPS(length=5, state="zeros") mps.pad_bond_dimension(4) # enlarge first - with pytest.raises( - ValueError, match="Target bond dim must be at least current bond dim" - ): + with pytest.raises(ValueError, match="Target bond dim must be at least current bond dim"): mps.pad_bond_dimension(2) # would shrink - must fail @@ -1244,9 +1224,7 @@ def test_evaluate_observables_diagnostics_and_meta_then_pvm_separately() -> None Observable(GateLibrary.entropy(), [1, 2]), Observable(GateLibrary.schmidt_spectrum(), [1, 2]), ] - sim_diag = AnalogSimParams( - diagnostics_and_meta, elapsed_time=0.1, dt=0.1, show_progress=False - ) + sim_diag = AnalogSimParams(diagnostics_and_meta, elapsed_time=0.1, dt=0.1, show_progress=False) results_diag = np.empty((len(diagnostics_and_meta), 2), dtype=object) mps.evaluate_observables(sim_diag, results_diag, column_index=0) @@ -1485,9 +1463,7 @@ def test_compress_one_sweep_raises_on_invalid_direction() -> None: np.zeros((2, 2, 1, 1), dtype=complex), ] with pytest.raises(ValueError, match=r"direction must be 'lr' or 'rl'\."): - mpo._compress_one_sweep( - direction="xx", tol=1e-12, max_bond_dim=None - ) # noqa: SLF001 + mpo._compress_one_sweep(direction="xx", tol=1e-12, max_bond_dim=None) # noqa: SLF001 def test_from_pauli_sum_empty_spec_is_identity_term() -> None: From 5d27a95aa8b92c6d8fa506f99ee4735e27c107e9 Mon Sep 17 00:00:00 2001 From: Lukas Date: Fri, 13 Feb 2026 16:07:18 +0100 Subject: [PATCH 05/25] reduced from_matrix to the case d_in==d_out and added more tests. --- src/mqt/yaqs/core/data_structures/networks.py | 281 ++++++++++++------ tests/core/data_structures/test_networks.py | 17 +- 2 files changed, 205 insertions(+), 93 deletions(-) diff --git a/src/mqt/yaqs/core/data_structures/networks.py b/src/mqt/yaqs/core/data_structures/networks.py index 29cfa8d0..47c1d254 100644 --- a/src/mqt/yaqs/core/data_structures/networks.py +++ b/src/mqt/yaqs/core/data_structures/networks.py @@ -174,7 +174,9 @@ def __init__( vector[0] = rng.random() vector[1] = 1 - vector[0] elif state == "basis": - assert basis_string is not None, "basis_string must be provided for 'basis' state initialization." + assert ( + basis_string is not None + ), "basis_string must be provided for 'basis' state initialization." self.init_mps_from_basis(basis_string, self.physical_dimensions) break else: @@ -191,7 +193,9 @@ def __init__( if pad is not None: self.pad_bond_dimension(pad) - def init_mps_from_basis(self, basis_string: str, physical_dimensions: list[int]) -> None: + def init_mps_from_basis( + self, basis_string: str, physical_dimensions: list[int] + ) -> None: """Initialize a list of MPS tensors representing a product state from a basis string. Args: @@ -350,8 +354,12 @@ def get_schmidt_spectrum(self, sites: list[int]) -> NDArray[np.float64]: NDArray[np.float64]: The Schmidt spectrum (length 500), with unused entries filled with NaN. """ - assert len(sites) == 2, "Schmidt spectrum is defined on a bond (two adjacent sites)." - assert sites[0] + 1 == sites[1], "Schmidt spectrum only defined for nearest-neighbor cut." + assert ( + len(sites) == 2 + ), "Schmidt spectrum is defined on a bond (two adjacent sites)." + assert ( + sites[0] + 1 == sites[1] + ), "Schmidt spectrum only defined for nearest-neighbor cut." top_schmidt_vals = 500 i, j = sites a, b = self.tensors[i], self.tensors[j] @@ -407,7 +415,9 @@ def almost_equal(self, other: MPS) -> bool: return False return True - def shift_orthogonality_center_right(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: + def shift_orthogonality_center_right( + self, current_orthogonality_center: int, decomposition: str = "QR" + ) -> None: """Shifts orthogonality center right. This function performs a QR decomposition to shift the known current center to the right and move @@ -441,7 +451,9 @@ def shift_orthogonality_center_right(self, current_orthogonality_center: int, de self.tensors[current_orthogonality_center + 1], ) = (a_new, b_new) - def shift_orthogonality_center_left(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: + def shift_orthogonality_center_left( + self, current_orthogonality_center: int, decomposition: str = "QR" + ) -> None: """Shifts orthogonality center left. This function flips the network, performs a right shift, then flips the network again. @@ -452,10 +464,14 @@ def shift_orthogonality_center_left(self, current_orthogonality_center: int, dec Default is QR. """ self.flip_network() - self.shift_orthogonality_center_right(self.length - current_orthogonality_center - 1, decomposition) + self.shift_orthogonality_center_right( + self.length - current_orthogonality_center - 1, decomposition + ) self.flip_network() - def set_canonical_form(self, orthogonality_center: int, decomposition: str = "QR") -> None: + def set_canonical_form( + self, orthogonality_center: int, decomposition: str = "QR" + ) -> None: """Sets canonical form of MPS. Left and right normalizes an MPS around a selected site. @@ -466,7 +482,9 @@ def set_canonical_form(self, orthogonality_center: int, decomposition: str = "QR decomposition: Type of decomposition. Default QR. """ - def sweep_decomposition(orthogonality_center: int, decomposition: str = "QR") -> None: + def sweep_decomposition( + orthogonality_center: int, decomposition: str = "QR" + ) -> None: for site, _ in enumerate(self.tensors): if site == orthogonality_center: break @@ -497,13 +515,17 @@ def normalize(self, form: str = "B", decomposition: str = "QR") -> None: if form == "B": self.flip_network() - self.set_canonical_form(orthogonality_center=self.length - 1, decomposition=decomposition) + self.set_canonical_form( + orthogonality_center=self.length - 1, decomposition=decomposition + ) self.shift_orthogonality_center_right(self.length - 1, decomposition) if form == "B": self.flip_network() - def truncate(self, threshold: float = 1e-12, max_bond_dim: int | None = None) -> None: + def truncate( + self, threshold: float = 1e-12, max_bond_dim: int | None = None + ) -> None: """In-place MPS truncation via repeated two-site SVDs.""" orth_center = self.check_canonical_form()[0] if self.length == 1: @@ -525,7 +547,9 @@ def truncate(self, threshold: float = 1e-12, max_bond_dim: int | None = None) -> self.flip_network() - def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np.complex128: + def scalar_product( + self, other: MPS, sites: int | list[int] | None = None + ) -> np.complex128: """Compute the scalar (inner) product between two Matrix Product States (MPS). The function contracts the corresponding tensors of two MPS objects. If no specific site is @@ -552,8 +576,12 @@ def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np result = None for idx in range(self.length): # contract at each site into a 4-leg tensor - theta = oe.contract("abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx]) - result = theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) + theta = oe.contract( + "abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx] + ) + result = ( + theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) + ) # squeeze down to scalar assert result is not None return np.complex128(np.squeeze(result)) @@ -585,7 +613,9 @@ def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np msg = f"Invalid `sites` argument: {sites!r}" raise ValueError(msg) - def local_expect(self, operator: Observable, sites: int | list[int]) -> np.complex128: + def local_expect( + self, operator: Observable, sites: int | list[int] + ) -> np.complex128: """Compute the local expectation value of an operator on an MPS. The function applies the given operator to the tensor at the specified site of a deep copy of the @@ -611,11 +641,17 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl i = sites if isinstance(operator.sites, list): - assert operator.sites[0] == i, f"Operator sites mismatch {operator.sites[0]}, {i}" + assert ( + operator.sites[0] == i + ), f"Operator sites mismatch {operator.sites[0]}, {i}" elif isinstance(operator.sites, int): - assert operator.sites == i, f"Operator sites mismatch {operator.sites}, {i}" + assert ( + operator.sites == i + ), f"Operator sites mismatch {operator.sites}, {i}" - assert i is not None, f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" + assert ( + i is not None + ), f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" a = temp_state.tensors[i] temp_state.tensors[i] = oe.contract("ab, bcd->acd", operator.gate.matrix, a) @@ -626,10 +662,12 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl assert operator.sites[0] == i, "Observable sites mismatch" assert operator.sites[1] == j, "Observable sites mismatch" - assert operator.sites[0] < operator.sites[1], "Observable sites must be in ascending order." - assert operator.sites[1] - operator.sites[0] == 1, ( - "Only nearest-neighbor observables are currently implemented." - ) + assert ( + operator.sites[0] < operator.sites[1] + ), "Observable sites must be in ascending order." + assert ( + operator.sites[1] - operator.sites[0] == 1 + ), "Only nearest-neighbor observables are currently implemented." a = temp_state.tensors[i] b = temp_state.tensors[j] d_i, left, _ = a.shape @@ -641,7 +679,9 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl theta = theta.reshape(left, d_i * d_j, right) # (l, d_i*d_j, r) # 2) apply operator on the combined phys index - theta = oe.contract("ab, cbd->cad", operator.gate.matrix, theta) # (l, d_i*d_j, r) + theta = oe.contract( + "ab, cbd->cad", operator.gate.matrix, theta + ) # (l, d_i*d_j, r) theta = theta.reshape(left, d_i, d_j, right) # back to (l, d_i, d_j, r) # 3) split via SVD @@ -654,7 +694,9 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl u_tensor = u_mat.reshape(left, d_i, chi_new) # (l, d_i, r_new) a_new = u_tensor.transpose(1, 0, 2) # → (d_i, l, r_new) - v_tensor = (np.diag(s_vec) @ v_mat).reshape(chi_new, d_j, right) # (l_new, d_j, r) + v_tensor = (np.diag(s_vec) @ v_mat).reshape( + chi_new, d_j, right + ) # (l_new, d_j, r) b_new = v_tensor.transpose(1, 0, 2) # → (d_j, l_new, r) temp_state.tensors[i] = a_new @@ -690,24 +732,44 @@ def evaluate_observables( elif observable.gate.name == "total_bond": results[obs_index, column_index] = self.get_total_bond() elif observable.gate.name in {"entropy", "schmidt_spectrum"}: - assert isinstance(observable.sites, list), "Given metric requires a list of sites" - assert len(observable.sites) == 2, "Given metric requires 2 sites to act on." + assert isinstance( + observable.sites, list + ), "Given metric requires a list of sites" + assert ( + len(observable.sites) == 2 + ), "Given metric requires 2 sites to act on." max_site = max(observable.sites) min_site = min(observable.sites) - assert max_site - min_site == 1, "Entropy and Schmidt cuts must be nearest neighbor." + assert ( + max_site - min_site == 1 + ), "Entropy and Schmidt cuts must be nearest neighbor." for s in observable.sites: - assert s in range(self.length), f"Observable acting on non-existing site: {s}" + assert s in range( + self.length + ), f"Observable acting on non-existing site: {s}" if observable.gate.name == "entropy": - results[obs_index, column_index] = self.get_entropy(observable.sites) + results[obs_index, column_index] = self.get_entropy( + observable.sites + ) elif observable.gate.name == "schmidt_spectrum": - results[obs_index, column_index] = self.get_schmidt_spectrum(observable.sites) + results[obs_index, column_index] = self.get_schmidt_spectrum( + observable.sites + ) elif observable.gate.name == "pvm": - assert hasattr(observable.gate, "bitstring"), "Gate does not have attribute bitstring." - results[obs_index, column_index] = self.project_onto_bitstring(observable.gate.bitstring) + assert hasattr( + observable.gate, "bitstring" + ), "Gate does not have attribute bitstring." + results[obs_index, column_index] = self.project_onto_bitstring( + observable.gate.bitstring + ) else: - idx = observable.sites[0] if isinstance(observable.sites, list) else observable.sites + idx = ( + observable.sites[0] + if isinstance(observable.sites, list) + else observable.sites + ) if idx > last_site: for site in range(last_site, idx): temp_state.shift_orthogonality_center_right(site) @@ -732,16 +794,24 @@ def expect(self, observable: Observable) -> np.float64: elif isinstance(observable.sites, list): sites_list = observable.sites - assert sites_list is not None, f"Invalid type in expect {type(observable.sites).__name__}" + assert ( + sites_list is not None + ), f"Invalid type in expect {type(observable.sites).__name__}" - assert len(sites_list) < 3, "Only one- and two-site observables are currently implemented." + assert ( + len(sites_list) < 3 + ), "Only one- and two-site observables are currently implemented." for s in sites_list: - assert s in range(self.length), f"Observable acting on non-existing site: {s}" + assert s in range( + self.length + ), f"Observable acting on non-existing site: {s}" exp = self.local_expect(observable, sites_list) - assert exp.imag < 1e-13, f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." + assert ( + exp.imag < 1e-13 + ), f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." return exp.real def measure_single_shot(self) -> int: @@ -758,7 +828,9 @@ def measure_single_shot(self) -> int: temp_state = copy.deepcopy(self) bitstring = [] for site, tensor in enumerate(temp_state.tensors): - reduced_density_matrix = oe.contract("abc, dbc->ad", tensor, np.conj(tensor)) + reduced_density_matrix = oe.contract( + "abc, dbc->ad", tensor, np.conj(tensor) + ) probabilities = np.diag(reduced_density_matrix).real rng = np.random.default_rng() chosen_index = rng.choice(len(probabilities), p=probabilities) @@ -772,7 +844,9 @@ def measure_single_shot(self) -> int: temp_state.tensors[site + 1] = ( # noqa: B909 1 / np.sqrt(probabilities[chosen_index]) - * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) + * oe.contract( + "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] + ) ) return sum(c << i for i, c in enumerate(bitstring)) @@ -797,10 +871,14 @@ def measure_shots(self, shots: int) -> dict[int, int]: if shots > 1: max_workers = max(1, multiprocessing.cpu_count() - 1) with ( - concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor, + concurrent.futures.ProcessPoolExecutor( + max_workers=max_workers + ) as executor, tqdm(total=shots, desc="Measuring shots", ncols=80) as pbar, ): - futures = [executor.submit(self.measure_single_shot) for _ in range(shots)] + futures = [ + executor.submit(self.measure_single_shot) for _ in range(shots) + ] for future in concurrent.futures.as_completed(futures): result = future.result() results[result] = results.get(result, 0) + 1 @@ -824,7 +902,9 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: Returns: float: Probability of obtaining the given bitstring under projective measurement. """ - assert len(bitstring) == self.length, "Bitstring length must match number of sites" + assert ( + len(bitstring) == self.length + ), "Bitstring length must match number of sites" temp_state = copy.deepcopy(self) total_norm = 1.0 @@ -832,7 +912,9 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: state_index = int(char) tensor = temp_state.tensors[site] local_dim = self.physical_dimensions[site] - assert 0 <= state_index < local_dim, f"Invalid state index {state_index} at site {site}" + assert ( + 0 <= state_index < local_dim + ), f"Invalid state index {state_index} at site {site}" selected_state = np.zeros(local_dim) selected_state[state_index] = 1 @@ -849,7 +931,11 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: # Normalize and propagate if site != self.length - 1: temp_state.tensors[site + 1] = ( - 1 / norm * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) + 1 + / norm + * oe.contract( + "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] + ) ) return np.complex128(total_norm**2) @@ -1366,13 +1452,20 @@ def bose_hubbard( tensor[3, 3] = id_boson # build the full tensor list - tensors = [np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) for _ in range(length)] + tensors = [ + np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) + for _ in range(length) + ] # Left boundary: take only row 0 - tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype(np.complex128) + tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype( + np.complex128 + ) # Right boundary: take only col 3 - tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype(np.complex128) + tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype( + np.complex128 + ) mpo = cls() mpo.tensors = tensors @@ -1430,7 +1523,9 @@ def finite_state_machine( self.length = len(self.tensors) self.physical_dimension = self.tensors[0].shape[0] - def custom(self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True) -> None: + def custom( + self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True + ) -> None: """Custom MPO from tensors. Initialize the custom MPO (Matrix Product Operator) with the given tensors. @@ -1498,7 +1593,9 @@ def from_pauli_sum( self.physical_dimension = physical_dimension if not terms: - self.tensors = [np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length)] + self.tensors = [ + np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length) + ] return # 1. Parse terms into dense lists of operator names. @@ -1532,7 +1629,9 @@ def from_pauli_sum( term_trajectories[t_idx][length] = 0 # bond_state_maps[i] stores the mapping: (Op_str, Next_State_ID) -> Current_State_ID - bond_state_maps: list[dict[tuple[str, int], int]] = [{} for _ in range(length + 1)] + bond_state_maps: list[dict[tuple[str, int], int]] = [ + {} for _ in range(length + 1) + ] # Sweep Right-to-Left (sites L-1 down to 1) to build the FSM transitions. # We stop at bond 1. Bond 0 is always the single "Start" state. @@ -1598,7 +1697,9 @@ def from_pauli_sum( # The FSM construction is optimal for one-sided (suffix) uniqueness. # A standard two-sweep compression ("lr_rl") puts the MPO in canonical form # and removes any remaining redundancies (e.g., common prefixes). - self.compress(tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl") + self.compress( + tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl" + ) assert self.check_if_valid_mpo(), "MPO initialized wrong" def compress( @@ -1647,9 +1748,13 @@ def compress( for _ in range(n_sweeps): for direction in schedule: - self._compress_one_sweep(direction=direction, tol=tol, max_bond_dim=max_bond_dim) + self._compress_one_sweep( + direction=direction, tol=tol, max_bond_dim=max_bond_dim + ) - def _compress_one_sweep(self, *, direction: str, tol: float, max_bond_dim: int | None) -> None: + def _compress_one_sweep( + self, *, direction: str, tol: float, max_bond_dim: int | None + ) -> None: """Run one in-place MPO SVD compression sweep in the given direction. Args: @@ -1699,7 +1804,9 @@ def _compress_one_sweep(self, *, direction: str, tol: float, max_bond_dim: int | vh = vh[:keep, :] # Left tensor: (bond_dim_left, d, d, keep) -> (d, d, bond_dim_left, keep) - left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose(1, 2, 0, 3) + left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose( + 1, 2, 0, 3 + ) # Right tensor: (keep, d, d, bond_dim_right) -> (d, d, keep, bond_dim_right) svh = (s[:, None] * vh).reshape(keep, phys_dim, phys_dim, bond_dim_right) @@ -1779,93 +1886,89 @@ def to_matrix(self) -> NDArray[np.complex128]: def from_matrix( cls, mat: np.ndarray, - d_in: int, - d_out: int, - max_bond: Optional[int] = None, + d: int, + max_bond: int | None = None, cutoff: float = 0.0, - dtype=np.complex128, ) -> MPO: - """Factorize a dense matrix into an MPO with uniform local dimensions. + """ + Factorize a dense matrix into an MPO with uniform local dimension d. - Each site has shape (d_in, d_out). The number of sites n is inferred from + Each site has shape (d, d). The number of sites n is inferred from - mat.shape = (d_in**n, d_out**n) + mat.shape = (d**n, d**n) Parameters ---------- mat : np.ndarray - Matrix of shape (d_in**n, d_out**n). - d_in : int - Input physical dimension per site. - d_out : int - Output physical dimension per site. + Square matrix of shape (d**n, d**n). + d : int + Physical dimension per site. max_bond : Optional[int] Max bond dimension. cutoff : float Singular values <= cutoff are discarded. - dtype : np.dtype - Tensor dtype. """ + rows, cols = mat.shape - # --- infer length --- - n_rows = np.log(rows) / np.log(d_in) - n_cols = np.log(cols) / np.log(d_out) + if rows != cols: + raise ValueError("Matrix must be square for uniform MPO factorization.") - n = round(n_rows) + # --- infer chain length --- + n_float = np.log(rows) / np.log(d) + n = int(round(n_float)) - if not (np.isclose(n_rows, n) and np.isclose(n_cols, n)): - msg = f"Matrix dimensions are not compatible with uniform (d_in={d_in}, d_out={d_out}) MPO." - raise ValueError(msg) + if not np.isclose(n_float, n): + raise ValueError(f"Matrix dimension {rows} is not a power of d={d}.") - M = np.asarray(mat, dtype=dtype, order="F") + M = np.asarray(mat, dtype=np.complex128, order="F") left_rank = 1 rem = M.reshape(1, rows, cols) - tensors: List[np.ndarray] = [] + tensors: list[np.ndarray] = [] def _truncate(s: np.ndarray) -> int: r = s.size if cutoff > 0.0: - r = int(np.sum(s > cutoff)) - r = max(r, 1) + r = max(int(np.sum(s > cutoff)), 1) if max_bond is not None: r = min(r, max_bond) return r - # identical SVD sweep as before + # SVD sweep for k in range(n - 1): - rest_rows = d_in ** (n - k - 1) - rest_cols = d_out ** (n - k - 1) + rest = d ** (n - k - 1) - rem = rem.reshape(left_rank, d_in, rest_rows, d_out, rest_cols) + rem = rem.reshape(left_rank, d, rest, d, rest) rem_perm = np.transpose(rem, (1, 3, 0, 2, 4)) - X = rem_perm.reshape(d_in * d_out * left_rank, rest_rows * rest_cols) + X = rem_perm.reshape(d * d * left_rank, rest * rest) U, s, Vh = np.linalg.svd(X, full_matrices=False) r_keep = _truncate(s) + U = U[:, :r_keep] s = s[:r_keep] Vh = Vh[:r_keep, :] - T_k = U.reshape(d_in, d_out, left_rank, r_keep).astype(dtype, copy=False) + T_k = U.reshape(d, d, left_rank, r_keep) tensors.append(T_k) - rem = (s[:, None] * Vh).reshape(r_keep, rest_rows, rest_cols) + rem = (s[:, None] * Vh).reshape(r_keep, rest, rest) left_rank = r_keep - rem = rem.reshape(left_rank, d_in, d_out) + # last site + rem = rem.reshape(left_rank, d, d) - T_last = np.transpose(rem, (1, 2, 0)).reshape(d_in, d_out, left_rank, 1).astype(dtype, copy=False) + T_last = np.transpose(rem, (1, 2, 0)).reshape(d, d, left_rank, 1) tensors.append(T_last) mpo = cls() mpo.tensors = tensors mpo.length = n - mpo.physical_dimension = (d_in, d_out) + mpo.physical_dimension = d assert mpo.check_if_valid_mpo(), "MPO initialized wrong" diff --git a/tests/core/data_structures/test_networks.py b/tests/core/data_structures/test_networks.py index 0ea7f987..bbd1eb45 100644 --- a/tests/core/data_structures/test_networks.py +++ b/tests/core/data_structures/test_networks.py @@ -399,20 +399,29 @@ def test_from_matrix() -> None: This test constructs a dense Bose-Hubbard Hamiltonian and creates an MPO via from_matrix(). This is converted back via to_matrix and is compared to the original. The same is done for a random - matrix at maximal bond dimension. + matrix at maximal bond dimension and close-to maximal bond dimension. """ length = 5 # local dimension d = 3 H = _bose_hubbard_dense(length, d, 0.9, 0.6, 0.2) # exact down to bond dimension 4 - Hmpo = MPO.from_matrix(H, d, d, 4) + Hmpo = MPO.from_matrix(H, d, 4) assert np.allclose(H, Hmpo.to_matrix()) - H = np.random.rand(d**length, d**length) - Hmpo = MPO.from_matrix(H, d, d, 1000000) + H = np.random.rand(d**length, d**length) + 1j * np.random.rand( + d**length, d ** (length) + ) + Hmpo = MPO.from_matrix(H, d, 1000000) assert np.allclose(H, Hmpo.to_matrix()) + length = 6 + H = np.random.rand(d**length, d ** (length)) + 1j * np.random.rand( + d**length, d ** (length) + ) + Hmpo = MPO.from_matrix(H, d, 728) + assert np.max(np.abs(H - Hmpo.to_matrix())) < 1e-2 + def test_to_mps() -> None: """Test converting an MPO to an MPS. From ec05fd27e04e976b103835bc22aa8dfb901399ba Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 13 Feb 2026 15:08:25 +0000 Subject: [PATCH 06/25] =?UTF-8?q?=F0=9F=8E=A8=20pre-commit=20fixes?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/mqt/yaqs/core/data_structures/networks.py | 227 +++++------------- tests/core/data_structures/test_networks.py | 8 +- 2 files changed, 62 insertions(+), 173 deletions(-) diff --git a/src/mqt/yaqs/core/data_structures/networks.py b/src/mqt/yaqs/core/data_structures/networks.py index 47c1d254..dfe9953a 100644 --- a/src/mqt/yaqs/core/data_structures/networks.py +++ b/src/mqt/yaqs/core/data_structures/networks.py @@ -174,9 +174,7 @@ def __init__( vector[0] = rng.random() vector[1] = 1 - vector[0] elif state == "basis": - assert ( - basis_string is not None - ), "basis_string must be provided for 'basis' state initialization." + assert basis_string is not None, "basis_string must be provided for 'basis' state initialization." self.init_mps_from_basis(basis_string, self.physical_dimensions) break else: @@ -193,9 +191,7 @@ def __init__( if pad is not None: self.pad_bond_dimension(pad) - def init_mps_from_basis( - self, basis_string: str, physical_dimensions: list[int] - ) -> None: + def init_mps_from_basis(self, basis_string: str, physical_dimensions: list[int]) -> None: """Initialize a list of MPS tensors representing a product state from a basis string. Args: @@ -354,12 +350,8 @@ def get_schmidt_spectrum(self, sites: list[int]) -> NDArray[np.float64]: NDArray[np.float64]: The Schmidt spectrum (length 500), with unused entries filled with NaN. """ - assert ( - len(sites) == 2 - ), "Schmidt spectrum is defined on a bond (two adjacent sites)." - assert ( - sites[0] + 1 == sites[1] - ), "Schmidt spectrum only defined for nearest-neighbor cut." + assert len(sites) == 2, "Schmidt spectrum is defined on a bond (two adjacent sites)." + assert sites[0] + 1 == sites[1], "Schmidt spectrum only defined for nearest-neighbor cut." top_schmidt_vals = 500 i, j = sites a, b = self.tensors[i], self.tensors[j] @@ -415,9 +407,7 @@ def almost_equal(self, other: MPS) -> bool: return False return True - def shift_orthogonality_center_right( - self, current_orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def shift_orthogonality_center_right(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: """Shifts orthogonality center right. This function performs a QR decomposition to shift the known current center to the right and move @@ -451,9 +441,7 @@ def shift_orthogonality_center_right( self.tensors[current_orthogonality_center + 1], ) = (a_new, b_new) - def shift_orthogonality_center_left( - self, current_orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def shift_orthogonality_center_left(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: """Shifts orthogonality center left. This function flips the network, performs a right shift, then flips the network again. @@ -464,14 +452,10 @@ def shift_orthogonality_center_left( Default is QR. """ self.flip_network() - self.shift_orthogonality_center_right( - self.length - current_orthogonality_center - 1, decomposition - ) + self.shift_orthogonality_center_right(self.length - current_orthogonality_center - 1, decomposition) self.flip_network() - def set_canonical_form( - self, orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def set_canonical_form(self, orthogonality_center: int, decomposition: str = "QR") -> None: """Sets canonical form of MPS. Left and right normalizes an MPS around a selected site. @@ -482,9 +466,7 @@ def set_canonical_form( decomposition: Type of decomposition. Default QR. """ - def sweep_decomposition( - orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def sweep_decomposition(orthogonality_center: int, decomposition: str = "QR") -> None: for site, _ in enumerate(self.tensors): if site == orthogonality_center: break @@ -515,17 +497,13 @@ def normalize(self, form: str = "B", decomposition: str = "QR") -> None: if form == "B": self.flip_network() - self.set_canonical_form( - orthogonality_center=self.length - 1, decomposition=decomposition - ) + self.set_canonical_form(orthogonality_center=self.length - 1, decomposition=decomposition) self.shift_orthogonality_center_right(self.length - 1, decomposition) if form == "B": self.flip_network() - def truncate( - self, threshold: float = 1e-12, max_bond_dim: int | None = None - ) -> None: + def truncate(self, threshold: float = 1e-12, max_bond_dim: int | None = None) -> None: """In-place MPS truncation via repeated two-site SVDs.""" orth_center = self.check_canonical_form()[0] if self.length == 1: @@ -547,9 +525,7 @@ def truncate( self.flip_network() - def scalar_product( - self, other: MPS, sites: int | list[int] | None = None - ) -> np.complex128: + def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np.complex128: """Compute the scalar (inner) product between two Matrix Product States (MPS). The function contracts the corresponding tensors of two MPS objects. If no specific site is @@ -576,12 +552,8 @@ def scalar_product( result = None for idx in range(self.length): # contract at each site into a 4-leg tensor - theta = oe.contract( - "abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx] - ) - result = ( - theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) - ) + theta = oe.contract("abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx]) + result = theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) # squeeze down to scalar assert result is not None return np.complex128(np.squeeze(result)) @@ -613,9 +585,7 @@ def scalar_product( msg = f"Invalid `sites` argument: {sites!r}" raise ValueError(msg) - def local_expect( - self, operator: Observable, sites: int | list[int] - ) -> np.complex128: + def local_expect(self, operator: Observable, sites: int | list[int]) -> np.complex128: """Compute the local expectation value of an operator on an MPS. The function applies the given operator to the tensor at the specified site of a deep copy of the @@ -641,17 +611,11 @@ def local_expect( i = sites if isinstance(operator.sites, list): - assert ( - operator.sites[0] == i - ), f"Operator sites mismatch {operator.sites[0]}, {i}" + assert operator.sites[0] == i, f"Operator sites mismatch {operator.sites[0]}, {i}" elif isinstance(operator.sites, int): - assert ( - operator.sites == i - ), f"Operator sites mismatch {operator.sites}, {i}" + assert operator.sites == i, f"Operator sites mismatch {operator.sites}, {i}" - assert ( - i is not None - ), f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" + assert i is not None, f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" a = temp_state.tensors[i] temp_state.tensors[i] = oe.contract("ab, bcd->acd", operator.gate.matrix, a) @@ -662,12 +626,10 @@ def local_expect( assert operator.sites[0] == i, "Observable sites mismatch" assert operator.sites[1] == j, "Observable sites mismatch" - assert ( - operator.sites[0] < operator.sites[1] - ), "Observable sites must be in ascending order." - assert ( - operator.sites[1] - operator.sites[0] == 1 - ), "Only nearest-neighbor observables are currently implemented." + assert operator.sites[0] < operator.sites[1], "Observable sites must be in ascending order." + assert operator.sites[1] - operator.sites[0] == 1, ( + "Only nearest-neighbor observables are currently implemented." + ) a = temp_state.tensors[i] b = temp_state.tensors[j] d_i, left, _ = a.shape @@ -679,9 +641,7 @@ def local_expect( theta = theta.reshape(left, d_i * d_j, right) # (l, d_i*d_j, r) # 2) apply operator on the combined phys index - theta = oe.contract( - "ab, cbd->cad", operator.gate.matrix, theta - ) # (l, d_i*d_j, r) + theta = oe.contract("ab, cbd->cad", operator.gate.matrix, theta) # (l, d_i*d_j, r) theta = theta.reshape(left, d_i, d_j, right) # back to (l, d_i, d_j, r) # 3) split via SVD @@ -694,9 +654,7 @@ def local_expect( u_tensor = u_mat.reshape(left, d_i, chi_new) # (l, d_i, r_new) a_new = u_tensor.transpose(1, 0, 2) # → (d_i, l, r_new) - v_tensor = (np.diag(s_vec) @ v_mat).reshape( - chi_new, d_j, right - ) # (l_new, d_j, r) + v_tensor = (np.diag(s_vec) @ v_mat).reshape(chi_new, d_j, right) # (l_new, d_j, r) b_new = v_tensor.transpose(1, 0, 2) # → (d_j, l_new, r) temp_state.tensors[i] = a_new @@ -732,44 +690,24 @@ def evaluate_observables( elif observable.gate.name == "total_bond": results[obs_index, column_index] = self.get_total_bond() elif observable.gate.name in {"entropy", "schmidt_spectrum"}: - assert isinstance( - observable.sites, list - ), "Given metric requires a list of sites" - assert ( - len(observable.sites) == 2 - ), "Given metric requires 2 sites to act on." + assert isinstance(observable.sites, list), "Given metric requires a list of sites" + assert len(observable.sites) == 2, "Given metric requires 2 sites to act on." max_site = max(observable.sites) min_site = min(observable.sites) - assert ( - max_site - min_site == 1 - ), "Entropy and Schmidt cuts must be nearest neighbor." + assert max_site - min_site == 1, "Entropy and Schmidt cuts must be nearest neighbor." for s in observable.sites: - assert s in range( - self.length - ), f"Observable acting on non-existing site: {s}" + assert s in range(self.length), f"Observable acting on non-existing site: {s}" if observable.gate.name == "entropy": - results[obs_index, column_index] = self.get_entropy( - observable.sites - ) + results[obs_index, column_index] = self.get_entropy(observable.sites) elif observable.gate.name == "schmidt_spectrum": - results[obs_index, column_index] = self.get_schmidt_spectrum( - observable.sites - ) + results[obs_index, column_index] = self.get_schmidt_spectrum(observable.sites) elif observable.gate.name == "pvm": - assert hasattr( - observable.gate, "bitstring" - ), "Gate does not have attribute bitstring." - results[obs_index, column_index] = self.project_onto_bitstring( - observable.gate.bitstring - ) + assert hasattr(observable.gate, "bitstring"), "Gate does not have attribute bitstring." + results[obs_index, column_index] = self.project_onto_bitstring(observable.gate.bitstring) else: - idx = ( - observable.sites[0] - if isinstance(observable.sites, list) - else observable.sites - ) + idx = observable.sites[0] if isinstance(observable.sites, list) else observable.sites if idx > last_site: for site in range(last_site, idx): temp_state.shift_orthogonality_center_right(site) @@ -794,24 +732,16 @@ def expect(self, observable: Observable) -> np.float64: elif isinstance(observable.sites, list): sites_list = observable.sites - assert ( - sites_list is not None - ), f"Invalid type in expect {type(observable.sites).__name__}" + assert sites_list is not None, f"Invalid type in expect {type(observable.sites).__name__}" - assert ( - len(sites_list) < 3 - ), "Only one- and two-site observables are currently implemented." + assert len(sites_list) < 3, "Only one- and two-site observables are currently implemented." for s in sites_list: - assert s in range( - self.length - ), f"Observable acting on non-existing site: {s}" + assert s in range(self.length), f"Observable acting on non-existing site: {s}" exp = self.local_expect(observable, sites_list) - assert ( - exp.imag < 1e-13 - ), f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." + assert exp.imag < 1e-13, f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." return exp.real def measure_single_shot(self) -> int: @@ -828,9 +758,7 @@ def measure_single_shot(self) -> int: temp_state = copy.deepcopy(self) bitstring = [] for site, tensor in enumerate(temp_state.tensors): - reduced_density_matrix = oe.contract( - "abc, dbc->ad", tensor, np.conj(tensor) - ) + reduced_density_matrix = oe.contract("abc, dbc->ad", tensor, np.conj(tensor)) probabilities = np.diag(reduced_density_matrix).real rng = np.random.default_rng() chosen_index = rng.choice(len(probabilities), p=probabilities) @@ -844,9 +772,7 @@ def measure_single_shot(self) -> int: temp_state.tensors[site + 1] = ( # noqa: B909 1 / np.sqrt(probabilities[chosen_index]) - * oe.contract( - "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] - ) + * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) ) return sum(c << i for i, c in enumerate(bitstring)) @@ -871,14 +797,10 @@ def measure_shots(self, shots: int) -> dict[int, int]: if shots > 1: max_workers = max(1, multiprocessing.cpu_count() - 1) with ( - concurrent.futures.ProcessPoolExecutor( - max_workers=max_workers - ) as executor, + concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor, tqdm(total=shots, desc="Measuring shots", ncols=80) as pbar, ): - futures = [ - executor.submit(self.measure_single_shot) for _ in range(shots) - ] + futures = [executor.submit(self.measure_single_shot) for _ in range(shots)] for future in concurrent.futures.as_completed(futures): result = future.result() results[result] = results.get(result, 0) + 1 @@ -902,9 +824,7 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: Returns: float: Probability of obtaining the given bitstring under projective measurement. """ - assert ( - len(bitstring) == self.length - ), "Bitstring length must match number of sites" + assert len(bitstring) == self.length, "Bitstring length must match number of sites" temp_state = copy.deepcopy(self) total_norm = 1.0 @@ -912,9 +832,7 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: state_index = int(char) tensor = temp_state.tensors[site] local_dim = self.physical_dimensions[site] - assert ( - 0 <= state_index < local_dim - ), f"Invalid state index {state_index} at site {site}" + assert 0 <= state_index < local_dim, f"Invalid state index {state_index} at site {site}" selected_state = np.zeros(local_dim) selected_state[state_index] = 1 @@ -931,11 +849,7 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: # Normalize and propagate if site != self.length - 1: temp_state.tensors[site + 1] = ( - 1 - / norm - * oe.contract( - "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] - ) + 1 / norm * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) ) return np.complex128(total_norm**2) @@ -1452,20 +1366,13 @@ def bose_hubbard( tensor[3, 3] = id_boson # build the full tensor list - tensors = [ - np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) - for _ in range(length) - ] + tensors = [np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) for _ in range(length)] # Left boundary: take only row 0 - tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype( - np.complex128 - ) + tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype(np.complex128) # Right boundary: take only col 3 - tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype( - np.complex128 - ) + tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype(np.complex128) mpo = cls() mpo.tensors = tensors @@ -1523,9 +1430,7 @@ def finite_state_machine( self.length = len(self.tensors) self.physical_dimension = self.tensors[0].shape[0] - def custom( - self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True - ) -> None: + def custom(self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True) -> None: """Custom MPO from tensors. Initialize the custom MPO (Matrix Product Operator) with the given tensors. @@ -1593,9 +1498,7 @@ def from_pauli_sum( self.physical_dimension = physical_dimension if not terms: - self.tensors = [ - np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length) - ] + self.tensors = [np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length)] return # 1. Parse terms into dense lists of operator names. @@ -1629,9 +1532,7 @@ def from_pauli_sum( term_trajectories[t_idx][length] = 0 # bond_state_maps[i] stores the mapping: (Op_str, Next_State_ID) -> Current_State_ID - bond_state_maps: list[dict[tuple[str, int], int]] = [ - {} for _ in range(length + 1) - ] + bond_state_maps: list[dict[tuple[str, int], int]] = [{} for _ in range(length + 1)] # Sweep Right-to-Left (sites L-1 down to 1) to build the FSM transitions. # We stop at bond 1. Bond 0 is always the single "Start" state. @@ -1697,9 +1598,7 @@ def from_pauli_sum( # The FSM construction is optimal for one-sided (suffix) uniqueness. # A standard two-sweep compression ("lr_rl") puts the MPO in canonical form # and removes any remaining redundancies (e.g., common prefixes). - self.compress( - tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl" - ) + self.compress(tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl") assert self.check_if_valid_mpo(), "MPO initialized wrong" def compress( @@ -1748,13 +1647,9 @@ def compress( for _ in range(n_sweeps): for direction in schedule: - self._compress_one_sweep( - direction=direction, tol=tol, max_bond_dim=max_bond_dim - ) + self._compress_one_sweep(direction=direction, tol=tol, max_bond_dim=max_bond_dim) - def _compress_one_sweep( - self, *, direction: str, tol: float, max_bond_dim: int | None - ) -> None: + def _compress_one_sweep(self, *, direction: str, tol: float, max_bond_dim: int | None) -> None: """Run one in-place MPO SVD compression sweep in the given direction. Args: @@ -1804,9 +1699,7 @@ def _compress_one_sweep( vh = vh[:keep, :] # Left tensor: (bond_dim_left, d, d, keep) -> (d, d, bond_dim_left, keep) - left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose( - 1, 2, 0, 3 - ) + left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose(1, 2, 0, 3) # Right tensor: (keep, d, d, bond_dim_right) -> (d, d, keep, bond_dim_right) svh = (s[:, None] * vh).reshape(keep, phys_dim, phys_dim, bond_dim_right) @@ -1890,8 +1783,7 @@ def from_matrix( max_bond: int | None = None, cutoff: float = 0.0, ) -> MPO: - """ - Factorize a dense matrix into an MPO with uniform local dimension d. + """Factorize a dense matrix into an MPO with uniform local dimension d. Each site has shape (d, d). The number of sites n is inferred from @@ -1908,18 +1800,19 @@ def from_matrix( cutoff : float Singular values <= cutoff are discarded. """ - rows, cols = mat.shape if rows != cols: - raise ValueError("Matrix must be square for uniform MPO factorization.") + msg = "Matrix must be square for uniform MPO factorization." + raise ValueError(msg) # --- infer chain length --- n_float = np.log(rows) / np.log(d) - n = int(round(n_float)) + n = round(n_float) if not np.isclose(n_float, n): - raise ValueError(f"Matrix dimension {rows} is not a power of d={d}.") + msg = f"Matrix dimension {rows} is not a power of d={d}." + raise ValueError(msg) M = np.asarray(mat, dtype=np.complex128, order="F") diff --git a/tests/core/data_structures/test_networks.py b/tests/core/data_structures/test_networks.py index 17f6d28b..ab738266 100644 --- a/tests/core/data_structures/test_networks.py +++ b/tests/core/data_structures/test_networks.py @@ -401,16 +401,12 @@ def test_from_matrix() -> None: Hmpo = MPO.from_matrix(H, d, 4) assert np.allclose(H, Hmpo.to_matrix()) - H = np.random.rand(d**length, d**length) + 1j * np.random.rand( - d**length, d ** (length) - ) + H = np.random.rand(d**length, d**length) + 1j * np.random.rand(d**length, d ** (length)) Hmpo = MPO.from_matrix(H, d, 1000000) assert np.allclose(H, Hmpo.to_matrix()) length = 6 - H = np.random.rand(d**length, d ** (length)) + 1j * np.random.rand( - d**length, d ** (length) - ) + H = np.random.rand(d**length, d ** (length)) + 1j * np.random.rand(d**length, d ** (length)) Hmpo = MPO.from_matrix(H, d, 728) assert np.max(np.abs(H - Hmpo.to_matrix())) < 1e-2 From 8b3cc635a8a8b9ba43563cb7e4c41c830d9c0511 Mon Sep 17 00:00:00 2001 From: Lukas Date: Fri, 13 Feb 2026 17:18:47 +0100 Subject: [PATCH 07/25] removed Frotran order from from_matrix() --- src/mqt/yaqs/core/data_structures/networks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/mqt/yaqs/core/data_structures/networks.py b/src/mqt/yaqs/core/data_structures/networks.py index 47c1d254..20a3ea4b 100644 --- a/src/mqt/yaqs/core/data_structures/networks.py +++ b/src/mqt/yaqs/core/data_structures/networks.py @@ -1921,10 +1921,10 @@ def from_matrix( if not np.isclose(n_float, n): raise ValueError(f"Matrix dimension {rows} is not a power of d={d}.") - M = np.asarray(mat, dtype=np.complex128, order="F") + mat = np.asarray(mat, dtype=np.complex128) left_rank = 1 - rem = M.reshape(1, rows, cols) + rem = mat.reshape(1, rows, cols) tensors: list[np.ndarray] = [] From 2cf73d88894305282de946f8f688baa8583cbabc Mon Sep 17 00:00:00 2001 From: Lukas Date: Fri, 13 Feb 2026 17:21:00 +0100 Subject: [PATCH 08/25] removed Frotran order from from_matrix() --- src/mqt/yaqs/core/data_structures/networks.py | 215 +++++++++++++----- 1 file changed, 161 insertions(+), 54 deletions(-) diff --git a/src/mqt/yaqs/core/data_structures/networks.py b/src/mqt/yaqs/core/data_structures/networks.py index 428f3e51..9a9f992a 100644 --- a/src/mqt/yaqs/core/data_structures/networks.py +++ b/src/mqt/yaqs/core/data_structures/networks.py @@ -174,7 +174,9 @@ def __init__( vector[0] = rng.random() vector[1] = 1 - vector[0] elif state == "basis": - assert basis_string is not None, "basis_string must be provided for 'basis' state initialization." + assert ( + basis_string is not None + ), "basis_string must be provided for 'basis' state initialization." self.init_mps_from_basis(basis_string, self.physical_dimensions) break else: @@ -191,7 +193,9 @@ def __init__( if pad is not None: self.pad_bond_dimension(pad) - def init_mps_from_basis(self, basis_string: str, physical_dimensions: list[int]) -> None: + def init_mps_from_basis( + self, basis_string: str, physical_dimensions: list[int] + ) -> None: """Initialize a list of MPS tensors representing a product state from a basis string. Args: @@ -350,8 +354,12 @@ def get_schmidt_spectrum(self, sites: list[int]) -> NDArray[np.float64]: NDArray[np.float64]: The Schmidt spectrum (length 500), with unused entries filled with NaN. """ - assert len(sites) == 2, "Schmidt spectrum is defined on a bond (two adjacent sites)." - assert sites[0] + 1 == sites[1], "Schmidt spectrum only defined for nearest-neighbor cut." + assert ( + len(sites) == 2 + ), "Schmidt spectrum is defined on a bond (two adjacent sites)." + assert ( + sites[0] + 1 == sites[1] + ), "Schmidt spectrum only defined for nearest-neighbor cut." top_schmidt_vals = 500 i, j = sites a, b = self.tensors[i], self.tensors[j] @@ -407,7 +415,9 @@ def almost_equal(self, other: MPS) -> bool: return False return True - def shift_orthogonality_center_right(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: + def shift_orthogonality_center_right( + self, current_orthogonality_center: int, decomposition: str = "QR" + ) -> None: """Shifts orthogonality center right. This function performs a QR decomposition to shift the known current center to the right and move @@ -441,7 +451,9 @@ def shift_orthogonality_center_right(self, current_orthogonality_center: int, de self.tensors[current_orthogonality_center + 1], ) = (a_new, b_new) - def shift_orthogonality_center_left(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: + def shift_orthogonality_center_left( + self, current_orthogonality_center: int, decomposition: str = "QR" + ) -> None: """Shifts orthogonality center left. This function flips the network, performs a right shift, then flips the network again. @@ -452,10 +464,14 @@ def shift_orthogonality_center_left(self, current_orthogonality_center: int, dec Default is QR. """ self.flip_network() - self.shift_orthogonality_center_right(self.length - current_orthogonality_center - 1, decomposition) + self.shift_orthogonality_center_right( + self.length - current_orthogonality_center - 1, decomposition + ) self.flip_network() - def set_canonical_form(self, orthogonality_center: int, decomposition: str = "QR") -> None: + def set_canonical_form( + self, orthogonality_center: int, decomposition: str = "QR" + ) -> None: """Sets canonical form of MPS. Left and right normalizes an MPS around a selected site. @@ -466,7 +482,9 @@ def set_canonical_form(self, orthogonality_center: int, decomposition: str = "QR decomposition: Type of decomposition. Default QR. """ - def sweep_decomposition(orthogonality_center: int, decomposition: str = "QR") -> None: + def sweep_decomposition( + orthogonality_center: int, decomposition: str = "QR" + ) -> None: for site, _ in enumerate(self.tensors): if site == orthogonality_center: break @@ -497,13 +515,17 @@ def normalize(self, form: str = "B", decomposition: str = "QR") -> None: if form == "B": self.flip_network() - self.set_canonical_form(orthogonality_center=self.length - 1, decomposition=decomposition) + self.set_canonical_form( + orthogonality_center=self.length - 1, decomposition=decomposition + ) self.shift_orthogonality_center_right(self.length - 1, decomposition) if form == "B": self.flip_network() - def truncate(self, threshold: float = 1e-12, max_bond_dim: int | None = None) -> None: + def truncate( + self, threshold: float = 1e-12, max_bond_dim: int | None = None + ) -> None: """In-place MPS truncation via repeated two-site SVDs.""" orth_center = self.check_canonical_form()[0] if self.length == 1: @@ -525,7 +547,9 @@ def truncate(self, threshold: float = 1e-12, max_bond_dim: int | None = None) -> self.flip_network() - def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np.complex128: + def scalar_product( + self, other: MPS, sites: int | list[int] | None = None + ) -> np.complex128: """Compute the scalar (inner) product between two Matrix Product States (MPS). The function contracts the corresponding tensors of two MPS objects. If no specific site is @@ -552,8 +576,12 @@ def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np result = None for idx in range(self.length): # contract at each site into a 4-leg tensor - theta = oe.contract("abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx]) - result = theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) + theta = oe.contract( + "abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx] + ) + result = ( + theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) + ) # squeeze down to scalar assert result is not None return np.complex128(np.squeeze(result)) @@ -585,7 +613,9 @@ def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np msg = f"Invalid `sites` argument: {sites!r}" raise ValueError(msg) - def local_expect(self, operator: Observable, sites: int | list[int]) -> np.complex128: + def local_expect( + self, operator: Observable, sites: int | list[int] + ) -> np.complex128: """Compute the local expectation value of an operator on an MPS. The function applies the given operator to the tensor at the specified site of a deep copy of the @@ -611,11 +641,17 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl i = sites if isinstance(operator.sites, list): - assert operator.sites[0] == i, f"Operator sites mismatch {operator.sites[0]}, {i}" + assert ( + operator.sites[0] == i + ), f"Operator sites mismatch {operator.sites[0]}, {i}" elif isinstance(operator.sites, int): - assert operator.sites == i, f"Operator sites mismatch {operator.sites}, {i}" + assert ( + operator.sites == i + ), f"Operator sites mismatch {operator.sites}, {i}" - assert i is not None, f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" + assert ( + i is not None + ), f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" a = temp_state.tensors[i] temp_state.tensors[i] = oe.contract("ab, bcd->acd", operator.gate.matrix, a) @@ -626,10 +662,12 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl assert operator.sites[0] == i, "Observable sites mismatch" assert operator.sites[1] == j, "Observable sites mismatch" - assert operator.sites[0] < operator.sites[1], "Observable sites must be in ascending order." - assert operator.sites[1] - operator.sites[0] == 1, ( - "Only nearest-neighbor observables are currently implemented." - ) + assert ( + operator.sites[0] < operator.sites[1] + ), "Observable sites must be in ascending order." + assert ( + operator.sites[1] - operator.sites[0] == 1 + ), "Only nearest-neighbor observables are currently implemented." a = temp_state.tensors[i] b = temp_state.tensors[j] d_i, left, _ = a.shape @@ -641,7 +679,9 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl theta = theta.reshape(left, d_i * d_j, right) # (l, d_i*d_j, r) # 2) apply operator on the combined phys index - theta = oe.contract("ab, cbd->cad", operator.gate.matrix, theta) # (l, d_i*d_j, r) + theta = oe.contract( + "ab, cbd->cad", operator.gate.matrix, theta + ) # (l, d_i*d_j, r) theta = theta.reshape(left, d_i, d_j, right) # back to (l, d_i, d_j, r) # 3) split via SVD @@ -654,7 +694,9 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl u_tensor = u_mat.reshape(left, d_i, chi_new) # (l, d_i, r_new) a_new = u_tensor.transpose(1, 0, 2) # → (d_i, l, r_new) - v_tensor = (np.diag(s_vec) @ v_mat).reshape(chi_new, d_j, right) # (l_new, d_j, r) + v_tensor = (np.diag(s_vec) @ v_mat).reshape( + chi_new, d_j, right + ) # (l_new, d_j, r) b_new = v_tensor.transpose(1, 0, 2) # → (d_j, l_new, r) temp_state.tensors[i] = a_new @@ -690,24 +732,44 @@ def evaluate_observables( elif observable.gate.name == "total_bond": results[obs_index, column_index] = self.get_total_bond() elif observable.gate.name in {"entropy", "schmidt_spectrum"}: - assert isinstance(observable.sites, list), "Given metric requires a list of sites" - assert len(observable.sites) == 2, "Given metric requires 2 sites to act on." + assert isinstance( + observable.sites, list + ), "Given metric requires a list of sites" + assert ( + len(observable.sites) == 2 + ), "Given metric requires 2 sites to act on." max_site = max(observable.sites) min_site = min(observable.sites) - assert max_site - min_site == 1, "Entropy and Schmidt cuts must be nearest neighbor." + assert ( + max_site - min_site == 1 + ), "Entropy and Schmidt cuts must be nearest neighbor." for s in observable.sites: - assert s in range(self.length), f"Observable acting on non-existing site: {s}" + assert s in range( + self.length + ), f"Observable acting on non-existing site: {s}" if observable.gate.name == "entropy": - results[obs_index, column_index] = self.get_entropy(observable.sites) + results[obs_index, column_index] = self.get_entropy( + observable.sites + ) elif observable.gate.name == "schmidt_spectrum": - results[obs_index, column_index] = self.get_schmidt_spectrum(observable.sites) + results[obs_index, column_index] = self.get_schmidt_spectrum( + observable.sites + ) elif observable.gate.name == "pvm": - assert hasattr(observable.gate, "bitstring"), "Gate does not have attribute bitstring." - results[obs_index, column_index] = self.project_onto_bitstring(observable.gate.bitstring) + assert hasattr( + observable.gate, "bitstring" + ), "Gate does not have attribute bitstring." + results[obs_index, column_index] = self.project_onto_bitstring( + observable.gate.bitstring + ) else: - idx = observable.sites[0] if isinstance(observable.sites, list) else observable.sites + idx = ( + observable.sites[0] + if isinstance(observable.sites, list) + else observable.sites + ) if idx > last_site: for site in range(last_site, idx): temp_state.shift_orthogonality_center_right(site) @@ -732,16 +794,24 @@ def expect(self, observable: Observable) -> np.float64: elif isinstance(observable.sites, list): sites_list = observable.sites - assert sites_list is not None, f"Invalid type in expect {type(observable.sites).__name__}" + assert ( + sites_list is not None + ), f"Invalid type in expect {type(observable.sites).__name__}" - assert len(sites_list) < 3, "Only one- and two-site observables are currently implemented." + assert ( + len(sites_list) < 3 + ), "Only one- and two-site observables are currently implemented." for s in sites_list: - assert s in range(self.length), f"Observable acting on non-existing site: {s}" + assert s in range( + self.length + ), f"Observable acting on non-existing site: {s}" exp = self.local_expect(observable, sites_list) - assert exp.imag < 1e-13, f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." + assert ( + exp.imag < 1e-13 + ), f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." return exp.real def measure_single_shot(self) -> int: @@ -758,7 +828,9 @@ def measure_single_shot(self) -> int: temp_state = copy.deepcopy(self) bitstring = [] for site, tensor in enumerate(temp_state.tensors): - reduced_density_matrix = oe.contract("abc, dbc->ad", tensor, np.conj(tensor)) + reduced_density_matrix = oe.contract( + "abc, dbc->ad", tensor, np.conj(tensor) + ) probabilities = np.diag(reduced_density_matrix).real rng = np.random.default_rng() chosen_index = rng.choice(len(probabilities), p=probabilities) @@ -772,7 +844,9 @@ def measure_single_shot(self) -> int: temp_state.tensors[site + 1] = ( # noqa: B909 1 / np.sqrt(probabilities[chosen_index]) - * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) + * oe.contract( + "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] + ) ) return sum(c << i for i, c in enumerate(bitstring)) @@ -797,10 +871,14 @@ def measure_shots(self, shots: int) -> dict[int, int]: if shots > 1: max_workers = max(1, multiprocessing.cpu_count() - 1) with ( - concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor, + concurrent.futures.ProcessPoolExecutor( + max_workers=max_workers + ) as executor, tqdm(total=shots, desc="Measuring shots", ncols=80) as pbar, ): - futures = [executor.submit(self.measure_single_shot) for _ in range(shots)] + futures = [ + executor.submit(self.measure_single_shot) for _ in range(shots) + ] for future in concurrent.futures.as_completed(futures): result = future.result() results[result] = results.get(result, 0) + 1 @@ -824,7 +902,9 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: Returns: float: Probability of obtaining the given bitstring under projective measurement. """ - assert len(bitstring) == self.length, "Bitstring length must match number of sites" + assert ( + len(bitstring) == self.length + ), "Bitstring length must match number of sites" temp_state = copy.deepcopy(self) total_norm = 1.0 @@ -832,7 +912,9 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: state_index = int(char) tensor = temp_state.tensors[site] local_dim = self.physical_dimensions[site] - assert 0 <= state_index < local_dim, f"Invalid state index {state_index} at site {site}" + assert ( + 0 <= state_index < local_dim + ), f"Invalid state index {state_index} at site {site}" selected_state = np.zeros(local_dim) selected_state[state_index] = 1 @@ -849,7 +931,11 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: # Normalize and propagate if site != self.length - 1: temp_state.tensors[site + 1] = ( - 1 / norm * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) + 1 + / norm + * oe.contract( + "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] + ) ) return np.complex128(total_norm**2) @@ -1366,13 +1452,20 @@ def bose_hubbard( tensor[3, 3] = id_boson # build the full tensor list - tensors = [np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) for _ in range(length)] + tensors = [ + np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) + for _ in range(length) + ] # Left boundary: take only row 0 - tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype(np.complex128) + tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype( + np.complex128 + ) # Right boundary: take only col 3 - tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype(np.complex128) + tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype( + np.complex128 + ) mpo = cls() mpo.tensors = tensors @@ -1430,7 +1523,9 @@ def finite_state_machine( self.length = len(self.tensors) self.physical_dimension = self.tensors[0].shape[0] - def custom(self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True) -> None: + def custom( + self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True + ) -> None: """Custom MPO from tensors. Initialize the custom MPO (Matrix Product Operator) with the given tensors. @@ -1498,7 +1593,9 @@ def from_pauli_sum( self.physical_dimension = physical_dimension if not terms: - self.tensors = [np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length)] + self.tensors = [ + np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length) + ] return # 1. Parse terms into dense lists of operator names. @@ -1532,7 +1629,9 @@ def from_pauli_sum( term_trajectories[t_idx][length] = 0 # bond_state_maps[i] stores the mapping: (Op_str, Next_State_ID) -> Current_State_ID - bond_state_maps: list[dict[tuple[str, int], int]] = [{} for _ in range(length + 1)] + bond_state_maps: list[dict[tuple[str, int], int]] = [ + {} for _ in range(length + 1) + ] # Sweep Right-to-Left (sites L-1 down to 1) to build the FSM transitions. # We stop at bond 1. Bond 0 is always the single "Start" state. @@ -1598,7 +1697,9 @@ def from_pauli_sum( # The FSM construction is optimal for one-sided (suffix) uniqueness. # A standard two-sweep compression ("lr_rl") puts the MPO in canonical form # and removes any remaining redundancies (e.g., common prefixes). - self.compress(tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl") + self.compress( + tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl" + ) assert self.check_if_valid_mpo(), "MPO initialized wrong" def compress( @@ -1647,9 +1748,13 @@ def compress( for _ in range(n_sweeps): for direction in schedule: - self._compress_one_sweep(direction=direction, tol=tol, max_bond_dim=max_bond_dim) + self._compress_one_sweep( + direction=direction, tol=tol, max_bond_dim=max_bond_dim + ) - def _compress_one_sweep(self, *, direction: str, tol: float, max_bond_dim: int | None) -> None: + def _compress_one_sweep( + self, *, direction: str, tol: float, max_bond_dim: int | None + ) -> None: """Run one in-place MPO SVD compression sweep in the given direction. Args: @@ -1699,7 +1804,9 @@ def _compress_one_sweep(self, *, direction: str, tol: float, max_bond_dim: int | vh = vh[:keep, :] # Left tensor: (bond_dim_left, d, d, keep) -> (d, d, bond_dim_left, keep) - left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose(1, 2, 0, 3) + left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose( + 1, 2, 0, 3 + ) # Right tensor: (keep, d, d, bond_dim_right) -> (d, d, keep, bond_dim_right) svh = (s[:, None] * vh).reshape(keep, phys_dim, phys_dim, bond_dim_right) From 91f05d86aed14931d6bca75486bacea2bd62c1c6 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 13 Feb 2026 16:21:30 +0000 Subject: [PATCH 09/25] =?UTF-8?q?=F0=9F=8E=A8=20pre-commit=20fixes?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/mqt/yaqs/core/data_structures/networks.py | 215 +++++------------- 1 file changed, 54 insertions(+), 161 deletions(-) diff --git a/src/mqt/yaqs/core/data_structures/networks.py b/src/mqt/yaqs/core/data_structures/networks.py index 9a9f992a..428f3e51 100644 --- a/src/mqt/yaqs/core/data_structures/networks.py +++ b/src/mqt/yaqs/core/data_structures/networks.py @@ -174,9 +174,7 @@ def __init__( vector[0] = rng.random() vector[1] = 1 - vector[0] elif state == "basis": - assert ( - basis_string is not None - ), "basis_string must be provided for 'basis' state initialization." + assert basis_string is not None, "basis_string must be provided for 'basis' state initialization." self.init_mps_from_basis(basis_string, self.physical_dimensions) break else: @@ -193,9 +191,7 @@ def __init__( if pad is not None: self.pad_bond_dimension(pad) - def init_mps_from_basis( - self, basis_string: str, physical_dimensions: list[int] - ) -> None: + def init_mps_from_basis(self, basis_string: str, physical_dimensions: list[int]) -> None: """Initialize a list of MPS tensors representing a product state from a basis string. Args: @@ -354,12 +350,8 @@ def get_schmidt_spectrum(self, sites: list[int]) -> NDArray[np.float64]: NDArray[np.float64]: The Schmidt spectrum (length 500), with unused entries filled with NaN. """ - assert ( - len(sites) == 2 - ), "Schmidt spectrum is defined on a bond (two adjacent sites)." - assert ( - sites[0] + 1 == sites[1] - ), "Schmidt spectrum only defined for nearest-neighbor cut." + assert len(sites) == 2, "Schmidt spectrum is defined on a bond (two adjacent sites)." + assert sites[0] + 1 == sites[1], "Schmidt spectrum only defined for nearest-neighbor cut." top_schmidt_vals = 500 i, j = sites a, b = self.tensors[i], self.tensors[j] @@ -415,9 +407,7 @@ def almost_equal(self, other: MPS) -> bool: return False return True - def shift_orthogonality_center_right( - self, current_orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def shift_orthogonality_center_right(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: """Shifts orthogonality center right. This function performs a QR decomposition to shift the known current center to the right and move @@ -451,9 +441,7 @@ def shift_orthogonality_center_right( self.tensors[current_orthogonality_center + 1], ) = (a_new, b_new) - def shift_orthogonality_center_left( - self, current_orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def shift_orthogonality_center_left(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: """Shifts orthogonality center left. This function flips the network, performs a right shift, then flips the network again. @@ -464,14 +452,10 @@ def shift_orthogonality_center_left( Default is QR. """ self.flip_network() - self.shift_orthogonality_center_right( - self.length - current_orthogonality_center - 1, decomposition - ) + self.shift_orthogonality_center_right(self.length - current_orthogonality_center - 1, decomposition) self.flip_network() - def set_canonical_form( - self, orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def set_canonical_form(self, orthogonality_center: int, decomposition: str = "QR") -> None: """Sets canonical form of MPS. Left and right normalizes an MPS around a selected site. @@ -482,9 +466,7 @@ def set_canonical_form( decomposition: Type of decomposition. Default QR. """ - def sweep_decomposition( - orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def sweep_decomposition(orthogonality_center: int, decomposition: str = "QR") -> None: for site, _ in enumerate(self.tensors): if site == orthogonality_center: break @@ -515,17 +497,13 @@ def normalize(self, form: str = "B", decomposition: str = "QR") -> None: if form == "B": self.flip_network() - self.set_canonical_form( - orthogonality_center=self.length - 1, decomposition=decomposition - ) + self.set_canonical_form(orthogonality_center=self.length - 1, decomposition=decomposition) self.shift_orthogonality_center_right(self.length - 1, decomposition) if form == "B": self.flip_network() - def truncate( - self, threshold: float = 1e-12, max_bond_dim: int | None = None - ) -> None: + def truncate(self, threshold: float = 1e-12, max_bond_dim: int | None = None) -> None: """In-place MPS truncation via repeated two-site SVDs.""" orth_center = self.check_canonical_form()[0] if self.length == 1: @@ -547,9 +525,7 @@ def truncate( self.flip_network() - def scalar_product( - self, other: MPS, sites: int | list[int] | None = None - ) -> np.complex128: + def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np.complex128: """Compute the scalar (inner) product between two Matrix Product States (MPS). The function contracts the corresponding tensors of two MPS objects. If no specific site is @@ -576,12 +552,8 @@ def scalar_product( result = None for idx in range(self.length): # contract at each site into a 4-leg tensor - theta = oe.contract( - "abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx] - ) - result = ( - theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) - ) + theta = oe.contract("abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx]) + result = theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) # squeeze down to scalar assert result is not None return np.complex128(np.squeeze(result)) @@ -613,9 +585,7 @@ def scalar_product( msg = f"Invalid `sites` argument: {sites!r}" raise ValueError(msg) - def local_expect( - self, operator: Observable, sites: int | list[int] - ) -> np.complex128: + def local_expect(self, operator: Observable, sites: int | list[int]) -> np.complex128: """Compute the local expectation value of an operator on an MPS. The function applies the given operator to the tensor at the specified site of a deep copy of the @@ -641,17 +611,11 @@ def local_expect( i = sites if isinstance(operator.sites, list): - assert ( - operator.sites[0] == i - ), f"Operator sites mismatch {operator.sites[0]}, {i}" + assert operator.sites[0] == i, f"Operator sites mismatch {operator.sites[0]}, {i}" elif isinstance(operator.sites, int): - assert ( - operator.sites == i - ), f"Operator sites mismatch {operator.sites}, {i}" + assert operator.sites == i, f"Operator sites mismatch {operator.sites}, {i}" - assert ( - i is not None - ), f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" + assert i is not None, f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" a = temp_state.tensors[i] temp_state.tensors[i] = oe.contract("ab, bcd->acd", operator.gate.matrix, a) @@ -662,12 +626,10 @@ def local_expect( assert operator.sites[0] == i, "Observable sites mismatch" assert operator.sites[1] == j, "Observable sites mismatch" - assert ( - operator.sites[0] < operator.sites[1] - ), "Observable sites must be in ascending order." - assert ( - operator.sites[1] - operator.sites[0] == 1 - ), "Only nearest-neighbor observables are currently implemented." + assert operator.sites[0] < operator.sites[1], "Observable sites must be in ascending order." + assert operator.sites[1] - operator.sites[0] == 1, ( + "Only nearest-neighbor observables are currently implemented." + ) a = temp_state.tensors[i] b = temp_state.tensors[j] d_i, left, _ = a.shape @@ -679,9 +641,7 @@ def local_expect( theta = theta.reshape(left, d_i * d_j, right) # (l, d_i*d_j, r) # 2) apply operator on the combined phys index - theta = oe.contract( - "ab, cbd->cad", operator.gate.matrix, theta - ) # (l, d_i*d_j, r) + theta = oe.contract("ab, cbd->cad", operator.gate.matrix, theta) # (l, d_i*d_j, r) theta = theta.reshape(left, d_i, d_j, right) # back to (l, d_i, d_j, r) # 3) split via SVD @@ -694,9 +654,7 @@ def local_expect( u_tensor = u_mat.reshape(left, d_i, chi_new) # (l, d_i, r_new) a_new = u_tensor.transpose(1, 0, 2) # → (d_i, l, r_new) - v_tensor = (np.diag(s_vec) @ v_mat).reshape( - chi_new, d_j, right - ) # (l_new, d_j, r) + v_tensor = (np.diag(s_vec) @ v_mat).reshape(chi_new, d_j, right) # (l_new, d_j, r) b_new = v_tensor.transpose(1, 0, 2) # → (d_j, l_new, r) temp_state.tensors[i] = a_new @@ -732,44 +690,24 @@ def evaluate_observables( elif observable.gate.name == "total_bond": results[obs_index, column_index] = self.get_total_bond() elif observable.gate.name in {"entropy", "schmidt_spectrum"}: - assert isinstance( - observable.sites, list - ), "Given metric requires a list of sites" - assert ( - len(observable.sites) == 2 - ), "Given metric requires 2 sites to act on." + assert isinstance(observable.sites, list), "Given metric requires a list of sites" + assert len(observable.sites) == 2, "Given metric requires 2 sites to act on." max_site = max(observable.sites) min_site = min(observable.sites) - assert ( - max_site - min_site == 1 - ), "Entropy and Schmidt cuts must be nearest neighbor." + assert max_site - min_site == 1, "Entropy and Schmidt cuts must be nearest neighbor." for s in observable.sites: - assert s in range( - self.length - ), f"Observable acting on non-existing site: {s}" + assert s in range(self.length), f"Observable acting on non-existing site: {s}" if observable.gate.name == "entropy": - results[obs_index, column_index] = self.get_entropy( - observable.sites - ) + results[obs_index, column_index] = self.get_entropy(observable.sites) elif observable.gate.name == "schmidt_spectrum": - results[obs_index, column_index] = self.get_schmidt_spectrum( - observable.sites - ) + results[obs_index, column_index] = self.get_schmidt_spectrum(observable.sites) elif observable.gate.name == "pvm": - assert hasattr( - observable.gate, "bitstring" - ), "Gate does not have attribute bitstring." - results[obs_index, column_index] = self.project_onto_bitstring( - observable.gate.bitstring - ) + assert hasattr(observable.gate, "bitstring"), "Gate does not have attribute bitstring." + results[obs_index, column_index] = self.project_onto_bitstring(observable.gate.bitstring) else: - idx = ( - observable.sites[0] - if isinstance(observable.sites, list) - else observable.sites - ) + idx = observable.sites[0] if isinstance(observable.sites, list) else observable.sites if idx > last_site: for site in range(last_site, idx): temp_state.shift_orthogonality_center_right(site) @@ -794,24 +732,16 @@ def expect(self, observable: Observable) -> np.float64: elif isinstance(observable.sites, list): sites_list = observable.sites - assert ( - sites_list is not None - ), f"Invalid type in expect {type(observable.sites).__name__}" + assert sites_list is not None, f"Invalid type in expect {type(observable.sites).__name__}" - assert ( - len(sites_list) < 3 - ), "Only one- and two-site observables are currently implemented." + assert len(sites_list) < 3, "Only one- and two-site observables are currently implemented." for s in sites_list: - assert s in range( - self.length - ), f"Observable acting on non-existing site: {s}" + assert s in range(self.length), f"Observable acting on non-existing site: {s}" exp = self.local_expect(observable, sites_list) - assert ( - exp.imag < 1e-13 - ), f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." + assert exp.imag < 1e-13, f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." return exp.real def measure_single_shot(self) -> int: @@ -828,9 +758,7 @@ def measure_single_shot(self) -> int: temp_state = copy.deepcopy(self) bitstring = [] for site, tensor in enumerate(temp_state.tensors): - reduced_density_matrix = oe.contract( - "abc, dbc->ad", tensor, np.conj(tensor) - ) + reduced_density_matrix = oe.contract("abc, dbc->ad", tensor, np.conj(tensor)) probabilities = np.diag(reduced_density_matrix).real rng = np.random.default_rng() chosen_index = rng.choice(len(probabilities), p=probabilities) @@ -844,9 +772,7 @@ def measure_single_shot(self) -> int: temp_state.tensors[site + 1] = ( # noqa: B909 1 / np.sqrt(probabilities[chosen_index]) - * oe.contract( - "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] - ) + * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) ) return sum(c << i for i, c in enumerate(bitstring)) @@ -871,14 +797,10 @@ def measure_shots(self, shots: int) -> dict[int, int]: if shots > 1: max_workers = max(1, multiprocessing.cpu_count() - 1) with ( - concurrent.futures.ProcessPoolExecutor( - max_workers=max_workers - ) as executor, + concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor, tqdm(total=shots, desc="Measuring shots", ncols=80) as pbar, ): - futures = [ - executor.submit(self.measure_single_shot) for _ in range(shots) - ] + futures = [executor.submit(self.measure_single_shot) for _ in range(shots)] for future in concurrent.futures.as_completed(futures): result = future.result() results[result] = results.get(result, 0) + 1 @@ -902,9 +824,7 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: Returns: float: Probability of obtaining the given bitstring under projective measurement. """ - assert ( - len(bitstring) == self.length - ), "Bitstring length must match number of sites" + assert len(bitstring) == self.length, "Bitstring length must match number of sites" temp_state = copy.deepcopy(self) total_norm = 1.0 @@ -912,9 +832,7 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: state_index = int(char) tensor = temp_state.tensors[site] local_dim = self.physical_dimensions[site] - assert ( - 0 <= state_index < local_dim - ), f"Invalid state index {state_index} at site {site}" + assert 0 <= state_index < local_dim, f"Invalid state index {state_index} at site {site}" selected_state = np.zeros(local_dim) selected_state[state_index] = 1 @@ -931,11 +849,7 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: # Normalize and propagate if site != self.length - 1: temp_state.tensors[site + 1] = ( - 1 - / norm - * oe.contract( - "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] - ) + 1 / norm * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) ) return np.complex128(total_norm**2) @@ -1452,20 +1366,13 @@ def bose_hubbard( tensor[3, 3] = id_boson # build the full tensor list - tensors = [ - np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) - for _ in range(length) - ] + tensors = [np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) for _ in range(length)] # Left boundary: take only row 0 - tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype( - np.complex128 - ) + tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype(np.complex128) # Right boundary: take only col 3 - tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype( - np.complex128 - ) + tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype(np.complex128) mpo = cls() mpo.tensors = tensors @@ -1523,9 +1430,7 @@ def finite_state_machine( self.length = len(self.tensors) self.physical_dimension = self.tensors[0].shape[0] - def custom( - self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True - ) -> None: + def custom(self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True) -> None: """Custom MPO from tensors. Initialize the custom MPO (Matrix Product Operator) with the given tensors. @@ -1593,9 +1498,7 @@ def from_pauli_sum( self.physical_dimension = physical_dimension if not terms: - self.tensors = [ - np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length) - ] + self.tensors = [np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length)] return # 1. Parse terms into dense lists of operator names. @@ -1629,9 +1532,7 @@ def from_pauli_sum( term_trajectories[t_idx][length] = 0 # bond_state_maps[i] stores the mapping: (Op_str, Next_State_ID) -> Current_State_ID - bond_state_maps: list[dict[tuple[str, int], int]] = [ - {} for _ in range(length + 1) - ] + bond_state_maps: list[dict[tuple[str, int], int]] = [{} for _ in range(length + 1)] # Sweep Right-to-Left (sites L-1 down to 1) to build the FSM transitions. # We stop at bond 1. Bond 0 is always the single "Start" state. @@ -1697,9 +1598,7 @@ def from_pauli_sum( # The FSM construction is optimal for one-sided (suffix) uniqueness. # A standard two-sweep compression ("lr_rl") puts the MPO in canonical form # and removes any remaining redundancies (e.g., common prefixes). - self.compress( - tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl" - ) + self.compress(tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl") assert self.check_if_valid_mpo(), "MPO initialized wrong" def compress( @@ -1748,13 +1647,9 @@ def compress( for _ in range(n_sweeps): for direction in schedule: - self._compress_one_sweep( - direction=direction, tol=tol, max_bond_dim=max_bond_dim - ) + self._compress_one_sweep(direction=direction, tol=tol, max_bond_dim=max_bond_dim) - def _compress_one_sweep( - self, *, direction: str, tol: float, max_bond_dim: int | None - ) -> None: + def _compress_one_sweep(self, *, direction: str, tol: float, max_bond_dim: int | None) -> None: """Run one in-place MPO SVD compression sweep in the given direction. Args: @@ -1804,9 +1699,7 @@ def _compress_one_sweep( vh = vh[:keep, :] # Left tensor: (bond_dim_left, d, d, keep) -> (d, d, bond_dim_left, keep) - left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose( - 1, 2, 0, 3 - ) + left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose(1, 2, 0, 3) # Right tensor: (keep, d, d, bond_dim_right) -> (d, d, keep, bond_dim_right) svh = (s[:, None] * vh).reshape(keep, phys_dim, phys_dim, bond_dim_right) From 42c22f1ff1985d3a09b609ba13a641fdfdc6189c Mon Sep 17 00:00:00 2001 From: Lukas Date: Mon, 16 Feb 2026 11:52:54 +0100 Subject: [PATCH 10/25] added boundary cases to MPO_fom_matrix() + docstring update --- src/mqt/yaqs/core/data_structures/networks.py | 289 +++++++++++++----- 1 file changed, 212 insertions(+), 77 deletions(-) diff --git a/src/mqt/yaqs/core/data_structures/networks.py b/src/mqt/yaqs/core/data_structures/networks.py index 428f3e51..f2fca552 100644 --- a/src/mqt/yaqs/core/data_structures/networks.py +++ b/src/mqt/yaqs/core/data_structures/networks.py @@ -174,7 +174,9 @@ def __init__( vector[0] = rng.random() vector[1] = 1 - vector[0] elif state == "basis": - assert basis_string is not None, "basis_string must be provided for 'basis' state initialization." + assert ( + basis_string is not None + ), "basis_string must be provided for 'basis' state initialization." self.init_mps_from_basis(basis_string, self.physical_dimensions) break else: @@ -191,7 +193,9 @@ def __init__( if pad is not None: self.pad_bond_dimension(pad) - def init_mps_from_basis(self, basis_string: str, physical_dimensions: list[int]) -> None: + def init_mps_from_basis( + self, basis_string: str, physical_dimensions: list[int] + ) -> None: """Initialize a list of MPS tensors representing a product state from a basis string. Args: @@ -350,8 +354,12 @@ def get_schmidt_spectrum(self, sites: list[int]) -> NDArray[np.float64]: NDArray[np.float64]: The Schmidt spectrum (length 500), with unused entries filled with NaN. """ - assert len(sites) == 2, "Schmidt spectrum is defined on a bond (two adjacent sites)." - assert sites[0] + 1 == sites[1], "Schmidt spectrum only defined for nearest-neighbor cut." + assert ( + len(sites) == 2 + ), "Schmidt spectrum is defined on a bond (two adjacent sites)." + assert ( + sites[0] + 1 == sites[1] + ), "Schmidt spectrum only defined for nearest-neighbor cut." top_schmidt_vals = 500 i, j = sites a, b = self.tensors[i], self.tensors[j] @@ -407,7 +415,9 @@ def almost_equal(self, other: MPS) -> bool: return False return True - def shift_orthogonality_center_right(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: + def shift_orthogonality_center_right( + self, current_orthogonality_center: int, decomposition: str = "QR" + ) -> None: """Shifts orthogonality center right. This function performs a QR decomposition to shift the known current center to the right and move @@ -441,7 +451,9 @@ def shift_orthogonality_center_right(self, current_orthogonality_center: int, de self.tensors[current_orthogonality_center + 1], ) = (a_new, b_new) - def shift_orthogonality_center_left(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: + def shift_orthogonality_center_left( + self, current_orthogonality_center: int, decomposition: str = "QR" + ) -> None: """Shifts orthogonality center left. This function flips the network, performs a right shift, then flips the network again. @@ -452,10 +464,14 @@ def shift_orthogonality_center_left(self, current_orthogonality_center: int, dec Default is QR. """ self.flip_network() - self.shift_orthogonality_center_right(self.length - current_orthogonality_center - 1, decomposition) + self.shift_orthogonality_center_right( + self.length - current_orthogonality_center - 1, decomposition + ) self.flip_network() - def set_canonical_form(self, orthogonality_center: int, decomposition: str = "QR") -> None: + def set_canonical_form( + self, orthogonality_center: int, decomposition: str = "QR" + ) -> None: """Sets canonical form of MPS. Left and right normalizes an MPS around a selected site. @@ -466,7 +482,9 @@ def set_canonical_form(self, orthogonality_center: int, decomposition: str = "QR decomposition: Type of decomposition. Default QR. """ - def sweep_decomposition(orthogonality_center: int, decomposition: str = "QR") -> None: + def sweep_decomposition( + orthogonality_center: int, decomposition: str = "QR" + ) -> None: for site, _ in enumerate(self.tensors): if site == orthogonality_center: break @@ -497,13 +515,17 @@ def normalize(self, form: str = "B", decomposition: str = "QR") -> None: if form == "B": self.flip_network() - self.set_canonical_form(orthogonality_center=self.length - 1, decomposition=decomposition) + self.set_canonical_form( + orthogonality_center=self.length - 1, decomposition=decomposition + ) self.shift_orthogonality_center_right(self.length - 1, decomposition) if form == "B": self.flip_network() - def truncate(self, threshold: float = 1e-12, max_bond_dim: int | None = None) -> None: + def truncate( + self, threshold: float = 1e-12, max_bond_dim: int | None = None + ) -> None: """In-place MPS truncation via repeated two-site SVDs.""" orth_center = self.check_canonical_form()[0] if self.length == 1: @@ -525,7 +547,9 @@ def truncate(self, threshold: float = 1e-12, max_bond_dim: int | None = None) -> self.flip_network() - def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np.complex128: + def scalar_product( + self, other: MPS, sites: int | list[int] | None = None + ) -> np.complex128: """Compute the scalar (inner) product between two Matrix Product States (MPS). The function contracts the corresponding tensors of two MPS objects. If no specific site is @@ -552,8 +576,12 @@ def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np result = None for idx in range(self.length): # contract at each site into a 4-leg tensor - theta = oe.contract("abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx]) - result = theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) + theta = oe.contract( + "abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx] + ) + result = ( + theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) + ) # squeeze down to scalar assert result is not None return np.complex128(np.squeeze(result)) @@ -585,7 +613,9 @@ def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np msg = f"Invalid `sites` argument: {sites!r}" raise ValueError(msg) - def local_expect(self, operator: Observable, sites: int | list[int]) -> np.complex128: + def local_expect( + self, operator: Observable, sites: int | list[int] + ) -> np.complex128: """Compute the local expectation value of an operator on an MPS. The function applies the given operator to the tensor at the specified site of a deep copy of the @@ -611,11 +641,17 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl i = sites if isinstance(operator.sites, list): - assert operator.sites[0] == i, f"Operator sites mismatch {operator.sites[0]}, {i}" + assert ( + operator.sites[0] == i + ), f"Operator sites mismatch {operator.sites[0]}, {i}" elif isinstance(operator.sites, int): - assert operator.sites == i, f"Operator sites mismatch {operator.sites}, {i}" + assert ( + operator.sites == i + ), f"Operator sites mismatch {operator.sites}, {i}" - assert i is not None, f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" + assert ( + i is not None + ), f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" a = temp_state.tensors[i] temp_state.tensors[i] = oe.contract("ab, bcd->acd", operator.gate.matrix, a) @@ -626,10 +662,12 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl assert operator.sites[0] == i, "Observable sites mismatch" assert operator.sites[1] == j, "Observable sites mismatch" - assert operator.sites[0] < operator.sites[1], "Observable sites must be in ascending order." - assert operator.sites[1] - operator.sites[0] == 1, ( - "Only nearest-neighbor observables are currently implemented." - ) + assert ( + operator.sites[0] < operator.sites[1] + ), "Observable sites must be in ascending order." + assert ( + operator.sites[1] - operator.sites[0] == 1 + ), "Only nearest-neighbor observables are currently implemented." a = temp_state.tensors[i] b = temp_state.tensors[j] d_i, left, _ = a.shape @@ -641,7 +679,9 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl theta = theta.reshape(left, d_i * d_j, right) # (l, d_i*d_j, r) # 2) apply operator on the combined phys index - theta = oe.contract("ab, cbd->cad", operator.gate.matrix, theta) # (l, d_i*d_j, r) + theta = oe.contract( + "ab, cbd->cad", operator.gate.matrix, theta + ) # (l, d_i*d_j, r) theta = theta.reshape(left, d_i, d_j, right) # back to (l, d_i, d_j, r) # 3) split via SVD @@ -654,7 +694,9 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl u_tensor = u_mat.reshape(left, d_i, chi_new) # (l, d_i, r_new) a_new = u_tensor.transpose(1, 0, 2) # → (d_i, l, r_new) - v_tensor = (np.diag(s_vec) @ v_mat).reshape(chi_new, d_j, right) # (l_new, d_j, r) + v_tensor = (np.diag(s_vec) @ v_mat).reshape( + chi_new, d_j, right + ) # (l_new, d_j, r) b_new = v_tensor.transpose(1, 0, 2) # → (d_j, l_new, r) temp_state.tensors[i] = a_new @@ -690,24 +732,44 @@ def evaluate_observables( elif observable.gate.name == "total_bond": results[obs_index, column_index] = self.get_total_bond() elif observable.gate.name in {"entropy", "schmidt_spectrum"}: - assert isinstance(observable.sites, list), "Given metric requires a list of sites" - assert len(observable.sites) == 2, "Given metric requires 2 sites to act on." + assert isinstance( + observable.sites, list + ), "Given metric requires a list of sites" + assert ( + len(observable.sites) == 2 + ), "Given metric requires 2 sites to act on." max_site = max(observable.sites) min_site = min(observable.sites) - assert max_site - min_site == 1, "Entropy and Schmidt cuts must be nearest neighbor." + assert ( + max_site - min_site == 1 + ), "Entropy and Schmidt cuts must be nearest neighbor." for s in observable.sites: - assert s in range(self.length), f"Observable acting on non-existing site: {s}" + assert s in range( + self.length + ), f"Observable acting on non-existing site: {s}" if observable.gate.name == "entropy": - results[obs_index, column_index] = self.get_entropy(observable.sites) + results[obs_index, column_index] = self.get_entropy( + observable.sites + ) elif observable.gate.name == "schmidt_spectrum": - results[obs_index, column_index] = self.get_schmidt_spectrum(observable.sites) + results[obs_index, column_index] = self.get_schmidt_spectrum( + observable.sites + ) elif observable.gate.name == "pvm": - assert hasattr(observable.gate, "bitstring"), "Gate does not have attribute bitstring." - results[obs_index, column_index] = self.project_onto_bitstring(observable.gate.bitstring) + assert hasattr( + observable.gate, "bitstring" + ), "Gate does not have attribute bitstring." + results[obs_index, column_index] = self.project_onto_bitstring( + observable.gate.bitstring + ) else: - idx = observable.sites[0] if isinstance(observable.sites, list) else observable.sites + idx = ( + observable.sites[0] + if isinstance(observable.sites, list) + else observable.sites + ) if idx > last_site: for site in range(last_site, idx): temp_state.shift_orthogonality_center_right(site) @@ -732,16 +794,24 @@ def expect(self, observable: Observable) -> np.float64: elif isinstance(observable.sites, list): sites_list = observable.sites - assert sites_list is not None, f"Invalid type in expect {type(observable.sites).__name__}" + assert ( + sites_list is not None + ), f"Invalid type in expect {type(observable.sites).__name__}" - assert len(sites_list) < 3, "Only one- and two-site observables are currently implemented." + assert ( + len(sites_list) < 3 + ), "Only one- and two-site observables are currently implemented." for s in sites_list: - assert s in range(self.length), f"Observable acting on non-existing site: {s}" + assert s in range( + self.length + ), f"Observable acting on non-existing site: {s}" exp = self.local_expect(observable, sites_list) - assert exp.imag < 1e-13, f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." + assert ( + exp.imag < 1e-13 + ), f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." return exp.real def measure_single_shot(self) -> int: @@ -758,7 +828,9 @@ def measure_single_shot(self) -> int: temp_state = copy.deepcopy(self) bitstring = [] for site, tensor in enumerate(temp_state.tensors): - reduced_density_matrix = oe.contract("abc, dbc->ad", tensor, np.conj(tensor)) + reduced_density_matrix = oe.contract( + "abc, dbc->ad", tensor, np.conj(tensor) + ) probabilities = np.diag(reduced_density_matrix).real rng = np.random.default_rng() chosen_index = rng.choice(len(probabilities), p=probabilities) @@ -772,7 +844,9 @@ def measure_single_shot(self) -> int: temp_state.tensors[site + 1] = ( # noqa: B909 1 / np.sqrt(probabilities[chosen_index]) - * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) + * oe.contract( + "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] + ) ) return sum(c << i for i, c in enumerate(bitstring)) @@ -797,10 +871,14 @@ def measure_shots(self, shots: int) -> dict[int, int]: if shots > 1: max_workers = max(1, multiprocessing.cpu_count() - 1) with ( - concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor, + concurrent.futures.ProcessPoolExecutor( + max_workers=max_workers + ) as executor, tqdm(total=shots, desc="Measuring shots", ncols=80) as pbar, ): - futures = [executor.submit(self.measure_single_shot) for _ in range(shots)] + futures = [ + executor.submit(self.measure_single_shot) for _ in range(shots) + ] for future in concurrent.futures.as_completed(futures): result = future.result() results[result] = results.get(result, 0) + 1 @@ -824,7 +902,9 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: Returns: float: Probability of obtaining the given bitstring under projective measurement. """ - assert len(bitstring) == self.length, "Bitstring length must match number of sites" + assert ( + len(bitstring) == self.length + ), "Bitstring length must match number of sites" temp_state = copy.deepcopy(self) total_norm = 1.0 @@ -832,7 +912,9 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: state_index = int(char) tensor = temp_state.tensors[site] local_dim = self.physical_dimensions[site] - assert 0 <= state_index < local_dim, f"Invalid state index {state_index} at site {site}" + assert ( + 0 <= state_index < local_dim + ), f"Invalid state index {state_index} at site {site}" selected_state = np.zeros(local_dim) selected_state[state_index] = 1 @@ -849,7 +931,11 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: # Normalize and propagate if site != self.length - 1: temp_state.tensors[site + 1] = ( - 1 / norm * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) + 1 + / norm + * oe.contract( + "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] + ) ) return np.complex128(total_norm**2) @@ -1366,13 +1452,20 @@ def bose_hubbard( tensor[3, 3] = id_boson # build the full tensor list - tensors = [np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) for _ in range(length)] + tensors = [ + np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) + for _ in range(length) + ] # Left boundary: take only row 0 - tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype(np.complex128) + tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype( + np.complex128 + ) # Right boundary: take only col 3 - tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype(np.complex128) + tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype( + np.complex128 + ) mpo = cls() mpo.tensors = tensors @@ -1430,7 +1523,9 @@ def finite_state_machine( self.length = len(self.tensors) self.physical_dimension = self.tensors[0].shape[0] - def custom(self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True) -> None: + def custom( + self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True + ) -> None: """Custom MPO from tensors. Initialize the custom MPO (Matrix Product Operator) with the given tensors. @@ -1498,7 +1593,9 @@ def from_pauli_sum( self.physical_dimension = physical_dimension if not terms: - self.tensors = [np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length)] + self.tensors = [ + np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length) + ] return # 1. Parse terms into dense lists of operator names. @@ -1532,7 +1629,9 @@ def from_pauli_sum( term_trajectories[t_idx][length] = 0 # bond_state_maps[i] stores the mapping: (Op_str, Next_State_ID) -> Current_State_ID - bond_state_maps: list[dict[tuple[str, int], int]] = [{} for _ in range(length + 1)] + bond_state_maps: list[dict[tuple[str, int], int]] = [ + {} for _ in range(length + 1) + ] # Sweep Right-to-Left (sites L-1 down to 1) to build the FSM transitions. # We stop at bond 1. Bond 0 is always the single "Start" state. @@ -1598,7 +1697,9 @@ def from_pauli_sum( # The FSM construction is optimal for one-sided (suffix) uniqueness. # A standard two-sweep compression ("lr_rl") puts the MPO in canonical form # and removes any remaining redundancies (e.g., common prefixes). - self.compress(tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl") + self.compress( + tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl" + ) assert self.check_if_valid_mpo(), "MPO initialized wrong" def compress( @@ -1647,9 +1748,13 @@ def compress( for _ in range(n_sweeps): for direction in schedule: - self._compress_one_sweep(direction=direction, tol=tol, max_bond_dim=max_bond_dim) + self._compress_one_sweep( + direction=direction, tol=tol, max_bond_dim=max_bond_dim + ) - def _compress_one_sweep(self, *, direction: str, tol: float, max_bond_dim: int | None) -> None: + def _compress_one_sweep( + self, *, direction: str, tol: float, max_bond_dim: int | None + ) -> None: """Run one in-place MPO SVD compression sweep in the given direction. Args: @@ -1699,7 +1804,9 @@ def _compress_one_sweep(self, *, direction: str, tol: float, max_bond_dim: int | vh = vh[:keep, :] # Left tensor: (bond_dim_left, d, d, keep) -> (d, d, bond_dim_left, keep) - left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose(1, 2, 0, 3) + left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose( + 1, 2, 0, 3 + ) # Right tensor: (keep, d, d, bond_dim_right) -> (d, d, keep, bond_dim_right) svh = (s[:, None] * vh).reshape(keep, phys_dim, phys_dim, bond_dim_right) @@ -1783,36 +1890,66 @@ def from_matrix( max_bond: int | None = None, cutoff: float = 0.0, ) -> MPO: - """Factorize a dense matrix into an MPO with uniform local dimension d. + """Factorize a dense matrix into an MPO with uniform local dimension ``d``. - Each site has shape (d, d). The number of sites n is inferred from + Each site has local shape ``(d, d)``. + The number of sites ``n`` is inferred from the relation: mat.shape = (d**n, d**n) - Parameters - ---------- - mat : np.ndarray - Square matrix of shape (d**n, d**n). - d : int - Physical dimension per site. - max_bond : Optional[int] - Max bond dimension. - cutoff : float - Singular values <= cutoff are discarded. + Args: + mat (np.ndarray): + Square matrix of shape ``(d**n, d**n)``. + d (int): + Physical dimension per site. Must satisfy ``d > 0``. + max_bond (int | None): + Maximum allowed bond dimension (before truncation). + cutoff (float): + Singular values ``<= cutoff`` are discarded. + + Returns: + MPO: + An MPO with ``n`` sites, uniform physical dimension ``d`` per site, + and bond dimensions determined by SVD truncation. + + Raises: + ValueError: + If ``d <= 0``; + If ``d == 1`` but the matrix is not ``1 × 1``; + If the matrix is not square; + If ``rows`` is not a power of ``d``; + If the inferred number of sites ``n < 1``. """ + + # --- validate d --- + if d <= 0: + raise ValueError(f"Physical dimension d must be > 0, got d={d}.") + rows, cols = mat.shape if rows != cols: - msg = "Matrix must be square for uniform MPO factorization." - raise ValueError(msg) + raise ValueError("Matrix must be square for uniform MPO factorization.") - # --- infer chain length --- - n_float = np.log(rows) / np.log(d) - n = round(n_float) + # Special case: d == 1 + if d == 1: + if rows != 1: + raise ValueError( + "For d == 1 the matrix must be 1×1 since 1**n = 1 for any n." + ) + n = 1 + else: + # --- infer chain length --- + n_float = np.log(rows) / np.log(d) + n = round(n_float) + + if n < 1: + raise ValueError( + f"Inferred chain length n={n} is invalid; " + f"matrix dimension {rows} too small for base d={d}." + ) - if not np.isclose(n_float, n): - msg = f"Matrix dimension {rows} is not a power of d={d}." - raise ValueError(msg) + if not np.isclose(n_float, n): + raise ValueError(f"Matrix dimension {rows} is not a power of d={d}.") mat = np.asarray(mat, dtype=np.complex128) @@ -1834,7 +1971,6 @@ def _truncate(s: np.ndarray) -> int: rest = d ** (n - k - 1) rem = rem.reshape(left_rank, d, rest, d, rest) - rem_perm = np.transpose(rem, (1, 3, 0, 2, 4)) X = rem_perm.reshape(d * d * left_rank, rest * rest) @@ -1854,7 +1990,6 @@ def _truncate(s: np.ndarray) -> int: # last site rem = rem.reshape(left_rank, d, d) - T_last = np.transpose(rem, (1, 2, 0)).reshape(d, d, left_rank, 1) tensors.append(T_last) @@ -1865,7 +2000,7 @@ def _truncate(s: np.ndarray) -> int: assert mpo.check_if_valid_mpo(), "MPO initialized wrong" - return mpo + return mpo def check_if_valid_mpo(self) -> bool: """MPO validity check. From 43364fba86f1a7096ef66d2619b9973a99e41903 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 16 Feb 2026 10:53:15 +0000 Subject: [PATCH 11/25] =?UTF-8?q?=F0=9F=8E=A8=20pre-commit=20fixes?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/mqt/yaqs/core/data_structures/networks.py | 234 +++++------------- 1 file changed, 64 insertions(+), 170 deletions(-) diff --git a/src/mqt/yaqs/core/data_structures/networks.py b/src/mqt/yaqs/core/data_structures/networks.py index f2fca552..0fa23fbf 100644 --- a/src/mqt/yaqs/core/data_structures/networks.py +++ b/src/mqt/yaqs/core/data_structures/networks.py @@ -174,9 +174,7 @@ def __init__( vector[0] = rng.random() vector[1] = 1 - vector[0] elif state == "basis": - assert ( - basis_string is not None - ), "basis_string must be provided for 'basis' state initialization." + assert basis_string is not None, "basis_string must be provided for 'basis' state initialization." self.init_mps_from_basis(basis_string, self.physical_dimensions) break else: @@ -193,9 +191,7 @@ def __init__( if pad is not None: self.pad_bond_dimension(pad) - def init_mps_from_basis( - self, basis_string: str, physical_dimensions: list[int] - ) -> None: + def init_mps_from_basis(self, basis_string: str, physical_dimensions: list[int]) -> None: """Initialize a list of MPS tensors representing a product state from a basis string. Args: @@ -354,12 +350,8 @@ def get_schmidt_spectrum(self, sites: list[int]) -> NDArray[np.float64]: NDArray[np.float64]: The Schmidt spectrum (length 500), with unused entries filled with NaN. """ - assert ( - len(sites) == 2 - ), "Schmidt spectrum is defined on a bond (two adjacent sites)." - assert ( - sites[0] + 1 == sites[1] - ), "Schmidt spectrum only defined for nearest-neighbor cut." + assert len(sites) == 2, "Schmidt spectrum is defined on a bond (two adjacent sites)." + assert sites[0] + 1 == sites[1], "Schmidt spectrum only defined for nearest-neighbor cut." top_schmidt_vals = 500 i, j = sites a, b = self.tensors[i], self.tensors[j] @@ -415,9 +407,7 @@ def almost_equal(self, other: MPS) -> bool: return False return True - def shift_orthogonality_center_right( - self, current_orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def shift_orthogonality_center_right(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: """Shifts orthogonality center right. This function performs a QR decomposition to shift the known current center to the right and move @@ -451,9 +441,7 @@ def shift_orthogonality_center_right( self.tensors[current_orthogonality_center + 1], ) = (a_new, b_new) - def shift_orthogonality_center_left( - self, current_orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def shift_orthogonality_center_left(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: """Shifts orthogonality center left. This function flips the network, performs a right shift, then flips the network again. @@ -464,14 +452,10 @@ def shift_orthogonality_center_left( Default is QR. """ self.flip_network() - self.shift_orthogonality_center_right( - self.length - current_orthogonality_center - 1, decomposition - ) + self.shift_orthogonality_center_right(self.length - current_orthogonality_center - 1, decomposition) self.flip_network() - def set_canonical_form( - self, orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def set_canonical_form(self, orthogonality_center: int, decomposition: str = "QR") -> None: """Sets canonical form of MPS. Left and right normalizes an MPS around a selected site. @@ -482,9 +466,7 @@ def set_canonical_form( decomposition: Type of decomposition. Default QR. """ - def sweep_decomposition( - orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def sweep_decomposition(orthogonality_center: int, decomposition: str = "QR") -> None: for site, _ in enumerate(self.tensors): if site == orthogonality_center: break @@ -515,17 +497,13 @@ def normalize(self, form: str = "B", decomposition: str = "QR") -> None: if form == "B": self.flip_network() - self.set_canonical_form( - orthogonality_center=self.length - 1, decomposition=decomposition - ) + self.set_canonical_form(orthogonality_center=self.length - 1, decomposition=decomposition) self.shift_orthogonality_center_right(self.length - 1, decomposition) if form == "B": self.flip_network() - def truncate( - self, threshold: float = 1e-12, max_bond_dim: int | None = None - ) -> None: + def truncate(self, threshold: float = 1e-12, max_bond_dim: int | None = None) -> None: """In-place MPS truncation via repeated two-site SVDs.""" orth_center = self.check_canonical_form()[0] if self.length == 1: @@ -547,9 +525,7 @@ def truncate( self.flip_network() - def scalar_product( - self, other: MPS, sites: int | list[int] | None = None - ) -> np.complex128: + def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np.complex128: """Compute the scalar (inner) product between two Matrix Product States (MPS). The function contracts the corresponding tensors of two MPS objects. If no specific site is @@ -576,12 +552,8 @@ def scalar_product( result = None for idx in range(self.length): # contract at each site into a 4-leg tensor - theta = oe.contract( - "abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx] - ) - result = ( - theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) - ) + theta = oe.contract("abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx]) + result = theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) # squeeze down to scalar assert result is not None return np.complex128(np.squeeze(result)) @@ -613,9 +585,7 @@ def scalar_product( msg = f"Invalid `sites` argument: {sites!r}" raise ValueError(msg) - def local_expect( - self, operator: Observable, sites: int | list[int] - ) -> np.complex128: + def local_expect(self, operator: Observable, sites: int | list[int]) -> np.complex128: """Compute the local expectation value of an operator on an MPS. The function applies the given operator to the tensor at the specified site of a deep copy of the @@ -641,17 +611,11 @@ def local_expect( i = sites if isinstance(operator.sites, list): - assert ( - operator.sites[0] == i - ), f"Operator sites mismatch {operator.sites[0]}, {i}" + assert operator.sites[0] == i, f"Operator sites mismatch {operator.sites[0]}, {i}" elif isinstance(operator.sites, int): - assert ( - operator.sites == i - ), f"Operator sites mismatch {operator.sites}, {i}" + assert operator.sites == i, f"Operator sites mismatch {operator.sites}, {i}" - assert ( - i is not None - ), f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" + assert i is not None, f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" a = temp_state.tensors[i] temp_state.tensors[i] = oe.contract("ab, bcd->acd", operator.gate.matrix, a) @@ -662,12 +626,10 @@ def local_expect( assert operator.sites[0] == i, "Observable sites mismatch" assert operator.sites[1] == j, "Observable sites mismatch" - assert ( - operator.sites[0] < operator.sites[1] - ), "Observable sites must be in ascending order." - assert ( - operator.sites[1] - operator.sites[0] == 1 - ), "Only nearest-neighbor observables are currently implemented." + assert operator.sites[0] < operator.sites[1], "Observable sites must be in ascending order." + assert operator.sites[1] - operator.sites[0] == 1, ( + "Only nearest-neighbor observables are currently implemented." + ) a = temp_state.tensors[i] b = temp_state.tensors[j] d_i, left, _ = a.shape @@ -679,9 +641,7 @@ def local_expect( theta = theta.reshape(left, d_i * d_j, right) # (l, d_i*d_j, r) # 2) apply operator on the combined phys index - theta = oe.contract( - "ab, cbd->cad", operator.gate.matrix, theta - ) # (l, d_i*d_j, r) + theta = oe.contract("ab, cbd->cad", operator.gate.matrix, theta) # (l, d_i*d_j, r) theta = theta.reshape(left, d_i, d_j, right) # back to (l, d_i, d_j, r) # 3) split via SVD @@ -694,9 +654,7 @@ def local_expect( u_tensor = u_mat.reshape(left, d_i, chi_new) # (l, d_i, r_new) a_new = u_tensor.transpose(1, 0, 2) # → (d_i, l, r_new) - v_tensor = (np.diag(s_vec) @ v_mat).reshape( - chi_new, d_j, right - ) # (l_new, d_j, r) + v_tensor = (np.diag(s_vec) @ v_mat).reshape(chi_new, d_j, right) # (l_new, d_j, r) b_new = v_tensor.transpose(1, 0, 2) # → (d_j, l_new, r) temp_state.tensors[i] = a_new @@ -732,44 +690,24 @@ def evaluate_observables( elif observable.gate.name == "total_bond": results[obs_index, column_index] = self.get_total_bond() elif observable.gate.name in {"entropy", "schmidt_spectrum"}: - assert isinstance( - observable.sites, list - ), "Given metric requires a list of sites" - assert ( - len(observable.sites) == 2 - ), "Given metric requires 2 sites to act on." + assert isinstance(observable.sites, list), "Given metric requires a list of sites" + assert len(observable.sites) == 2, "Given metric requires 2 sites to act on." max_site = max(observable.sites) min_site = min(observable.sites) - assert ( - max_site - min_site == 1 - ), "Entropy and Schmidt cuts must be nearest neighbor." + assert max_site - min_site == 1, "Entropy and Schmidt cuts must be nearest neighbor." for s in observable.sites: - assert s in range( - self.length - ), f"Observable acting on non-existing site: {s}" + assert s in range(self.length), f"Observable acting on non-existing site: {s}" if observable.gate.name == "entropy": - results[obs_index, column_index] = self.get_entropy( - observable.sites - ) + results[obs_index, column_index] = self.get_entropy(observable.sites) elif observable.gate.name == "schmidt_spectrum": - results[obs_index, column_index] = self.get_schmidt_spectrum( - observable.sites - ) + results[obs_index, column_index] = self.get_schmidt_spectrum(observable.sites) elif observable.gate.name == "pvm": - assert hasattr( - observable.gate, "bitstring" - ), "Gate does not have attribute bitstring." - results[obs_index, column_index] = self.project_onto_bitstring( - observable.gate.bitstring - ) + assert hasattr(observable.gate, "bitstring"), "Gate does not have attribute bitstring." + results[obs_index, column_index] = self.project_onto_bitstring(observable.gate.bitstring) else: - idx = ( - observable.sites[0] - if isinstance(observable.sites, list) - else observable.sites - ) + idx = observable.sites[0] if isinstance(observable.sites, list) else observable.sites if idx > last_site: for site in range(last_site, idx): temp_state.shift_orthogonality_center_right(site) @@ -794,24 +732,16 @@ def expect(self, observable: Observable) -> np.float64: elif isinstance(observable.sites, list): sites_list = observable.sites - assert ( - sites_list is not None - ), f"Invalid type in expect {type(observable.sites).__name__}" + assert sites_list is not None, f"Invalid type in expect {type(observable.sites).__name__}" - assert ( - len(sites_list) < 3 - ), "Only one- and two-site observables are currently implemented." + assert len(sites_list) < 3, "Only one- and two-site observables are currently implemented." for s in sites_list: - assert s in range( - self.length - ), f"Observable acting on non-existing site: {s}" + assert s in range(self.length), f"Observable acting on non-existing site: {s}" exp = self.local_expect(observable, sites_list) - assert ( - exp.imag < 1e-13 - ), f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." + assert exp.imag < 1e-13, f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." return exp.real def measure_single_shot(self) -> int: @@ -828,9 +758,7 @@ def measure_single_shot(self) -> int: temp_state = copy.deepcopy(self) bitstring = [] for site, tensor in enumerate(temp_state.tensors): - reduced_density_matrix = oe.contract( - "abc, dbc->ad", tensor, np.conj(tensor) - ) + reduced_density_matrix = oe.contract("abc, dbc->ad", tensor, np.conj(tensor)) probabilities = np.diag(reduced_density_matrix).real rng = np.random.default_rng() chosen_index = rng.choice(len(probabilities), p=probabilities) @@ -844,9 +772,7 @@ def measure_single_shot(self) -> int: temp_state.tensors[site + 1] = ( # noqa: B909 1 / np.sqrt(probabilities[chosen_index]) - * oe.contract( - "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] - ) + * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) ) return sum(c << i for i, c in enumerate(bitstring)) @@ -871,14 +797,10 @@ def measure_shots(self, shots: int) -> dict[int, int]: if shots > 1: max_workers = max(1, multiprocessing.cpu_count() - 1) with ( - concurrent.futures.ProcessPoolExecutor( - max_workers=max_workers - ) as executor, + concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor, tqdm(total=shots, desc="Measuring shots", ncols=80) as pbar, ): - futures = [ - executor.submit(self.measure_single_shot) for _ in range(shots) - ] + futures = [executor.submit(self.measure_single_shot) for _ in range(shots)] for future in concurrent.futures.as_completed(futures): result = future.result() results[result] = results.get(result, 0) + 1 @@ -902,9 +824,7 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: Returns: float: Probability of obtaining the given bitstring under projective measurement. """ - assert ( - len(bitstring) == self.length - ), "Bitstring length must match number of sites" + assert len(bitstring) == self.length, "Bitstring length must match number of sites" temp_state = copy.deepcopy(self) total_norm = 1.0 @@ -912,9 +832,7 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: state_index = int(char) tensor = temp_state.tensors[site] local_dim = self.physical_dimensions[site] - assert ( - 0 <= state_index < local_dim - ), f"Invalid state index {state_index} at site {site}" + assert 0 <= state_index < local_dim, f"Invalid state index {state_index} at site {site}" selected_state = np.zeros(local_dim) selected_state[state_index] = 1 @@ -931,11 +849,7 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: # Normalize and propagate if site != self.length - 1: temp_state.tensors[site + 1] = ( - 1 - / norm - * oe.contract( - "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] - ) + 1 / norm * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) ) return np.complex128(total_norm**2) @@ -1452,20 +1366,13 @@ def bose_hubbard( tensor[3, 3] = id_boson # build the full tensor list - tensors = [ - np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) - for _ in range(length) - ] + tensors = [np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) for _ in range(length)] # Left boundary: take only row 0 - tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype( - np.complex128 - ) + tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype(np.complex128) # Right boundary: take only col 3 - tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype( - np.complex128 - ) + tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype(np.complex128) mpo = cls() mpo.tensors = tensors @@ -1523,9 +1430,7 @@ def finite_state_machine( self.length = len(self.tensors) self.physical_dimension = self.tensors[0].shape[0] - def custom( - self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True - ) -> None: + def custom(self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True) -> None: """Custom MPO from tensors. Initialize the custom MPO (Matrix Product Operator) with the given tensors. @@ -1593,9 +1498,7 @@ def from_pauli_sum( self.physical_dimension = physical_dimension if not terms: - self.tensors = [ - np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length) - ] + self.tensors = [np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length)] return # 1. Parse terms into dense lists of operator names. @@ -1629,9 +1532,7 @@ def from_pauli_sum( term_trajectories[t_idx][length] = 0 # bond_state_maps[i] stores the mapping: (Op_str, Next_State_ID) -> Current_State_ID - bond_state_maps: list[dict[tuple[str, int], int]] = [ - {} for _ in range(length + 1) - ] + bond_state_maps: list[dict[tuple[str, int], int]] = [{} for _ in range(length + 1)] # Sweep Right-to-Left (sites L-1 down to 1) to build the FSM transitions. # We stop at bond 1. Bond 0 is always the single "Start" state. @@ -1697,9 +1598,7 @@ def from_pauli_sum( # The FSM construction is optimal for one-sided (suffix) uniqueness. # A standard two-sweep compression ("lr_rl") puts the MPO in canonical form # and removes any remaining redundancies (e.g., common prefixes). - self.compress( - tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl" - ) + self.compress(tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl") assert self.check_if_valid_mpo(), "MPO initialized wrong" def compress( @@ -1748,13 +1647,9 @@ def compress( for _ in range(n_sweeps): for direction in schedule: - self._compress_one_sweep( - direction=direction, tol=tol, max_bond_dim=max_bond_dim - ) + self._compress_one_sweep(direction=direction, tol=tol, max_bond_dim=max_bond_dim) - def _compress_one_sweep( - self, *, direction: str, tol: float, max_bond_dim: int | None - ) -> None: + def _compress_one_sweep(self, *, direction: str, tol: float, max_bond_dim: int | None) -> None: """Run one in-place MPO SVD compression sweep in the given direction. Args: @@ -1804,9 +1699,7 @@ def _compress_one_sweep( vh = vh[:keep, :] # Left tensor: (bond_dim_left, d, d, keep) -> (d, d, bond_dim_left, keep) - left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose( - 1, 2, 0, 3 - ) + left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose(1, 2, 0, 3) # Right tensor: (keep, d, d, bond_dim_right) -> (d, d, keep, bond_dim_right) svh = (s[:, None] * vh).reshape(keep, phys_dim, phys_dim, bond_dim_right) @@ -1920,22 +1813,22 @@ def from_matrix( If ``rows`` is not a power of ``d``; If the inferred number of sites ``n < 1``. """ - # --- validate d --- if d <= 0: - raise ValueError(f"Physical dimension d must be > 0, got d={d}.") + msg = f"Physical dimension d must be > 0, got d={d}." + raise ValueError(msg) rows, cols = mat.shape if rows != cols: - raise ValueError("Matrix must be square for uniform MPO factorization.") + msg = "Matrix must be square for uniform MPO factorization." + raise ValueError(msg) # Special case: d == 1 if d == 1: if rows != 1: - raise ValueError( - "For d == 1 the matrix must be 1×1 since 1**n = 1 for any n." - ) + msg = "For d == 1 the matrix must be 1×1 since 1**n = 1 for any n." + raise ValueError(msg) n = 1 else: # --- infer chain length --- @@ -1943,13 +1836,14 @@ def from_matrix( n = round(n_float) if n < 1: + msg = f"Inferred chain length n={n} is invalid; matrix dimension {rows} too small for base d={d}." raise ValueError( - f"Inferred chain length n={n} is invalid; " - f"matrix dimension {rows} too small for base d={d}." + msg ) if not np.isclose(n_float, n): - raise ValueError(f"Matrix dimension {rows} is not a power of d={d}.") + msg = f"Matrix dimension {rows} is not a power of d={d}." + raise ValueError(msg) mat = np.asarray(mat, dtype=np.complex128) From 8c217c1e4e331956269b11a0c52d6061873a4619 Mon Sep 17 00:00:00 2001 From: Lukas Date: Mon, 16 Feb 2026 11:55:24 +0100 Subject: [PATCH 12/25] format issue in MPOfrom_matrix fixed --- src/mqt/yaqs/core/data_structures/networks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mqt/yaqs/core/data_structures/networks.py b/src/mqt/yaqs/core/data_structures/networks.py index f2fca552..ae1d2a4b 100644 --- a/src/mqt/yaqs/core/data_structures/networks.py +++ b/src/mqt/yaqs/core/data_structures/networks.py @@ -2000,7 +2000,7 @@ def _truncate(s: np.ndarray) -> int: assert mpo.check_if_valid_mpo(), "MPO initialized wrong" - return mpo + return mpo def check_if_valid_mpo(self) -> bool: """MPO validity check. From 36a84e4db159a3f95fca6a1f7a18891ddd962310 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 16 Feb 2026 10:58:09 +0000 Subject: [PATCH 13/25] =?UTF-8?q?=F0=9F=8E=A8=20pre-commit=20fixes?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/mqt/yaqs/core/data_structures/networks.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/mqt/yaqs/core/data_structures/networks.py b/src/mqt/yaqs/core/data_structures/networks.py index 9ad56e33..f630a010 100644 --- a/src/mqt/yaqs/core/data_structures/networks.py +++ b/src/mqt/yaqs/core/data_structures/networks.py @@ -1837,9 +1837,7 @@ def from_matrix( if n < 1: msg = f"Inferred chain length n={n} is invalid; matrix dimension {rows} too small for base d={d}." - raise ValueError( - msg - ) + raise ValueError(msg) if not np.isclose(n_float, n): msg = f"Matrix dimension {rows} is not a power of d={d}." From 035882bdbe1ffba0f22aebd5055e051b699bca03 Mon Sep 17 00:00:00 2001 From: Lukas Date: Mon, 16 Feb 2026 12:07:21 +0100 Subject: [PATCH 14/25] minor docstring updates in MPO.from_matrix() --- src/mqt/yaqs/core/data_structures/networks.py | 224 +++++++++++++----- 1 file changed, 165 insertions(+), 59 deletions(-) diff --git a/src/mqt/yaqs/core/data_structures/networks.py b/src/mqt/yaqs/core/data_structures/networks.py index 9ad56e33..e47b5931 100644 --- a/src/mqt/yaqs/core/data_structures/networks.py +++ b/src/mqt/yaqs/core/data_structures/networks.py @@ -174,7 +174,9 @@ def __init__( vector[0] = rng.random() vector[1] = 1 - vector[0] elif state == "basis": - assert basis_string is not None, "basis_string must be provided for 'basis' state initialization." + assert ( + basis_string is not None + ), "basis_string must be provided for 'basis' state initialization." self.init_mps_from_basis(basis_string, self.physical_dimensions) break else: @@ -191,7 +193,9 @@ def __init__( if pad is not None: self.pad_bond_dimension(pad) - def init_mps_from_basis(self, basis_string: str, physical_dimensions: list[int]) -> None: + def init_mps_from_basis( + self, basis_string: str, physical_dimensions: list[int] + ) -> None: """Initialize a list of MPS tensors representing a product state from a basis string. Args: @@ -350,8 +354,12 @@ def get_schmidt_spectrum(self, sites: list[int]) -> NDArray[np.float64]: NDArray[np.float64]: The Schmidt spectrum (length 500), with unused entries filled with NaN. """ - assert len(sites) == 2, "Schmidt spectrum is defined on a bond (two adjacent sites)." - assert sites[0] + 1 == sites[1], "Schmidt spectrum only defined for nearest-neighbor cut." + assert ( + len(sites) == 2 + ), "Schmidt spectrum is defined on a bond (two adjacent sites)." + assert ( + sites[0] + 1 == sites[1] + ), "Schmidt spectrum only defined for nearest-neighbor cut." top_schmidt_vals = 500 i, j = sites a, b = self.tensors[i], self.tensors[j] @@ -407,7 +415,9 @@ def almost_equal(self, other: MPS) -> bool: return False return True - def shift_orthogonality_center_right(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: + def shift_orthogonality_center_right( + self, current_orthogonality_center: int, decomposition: str = "QR" + ) -> None: """Shifts orthogonality center right. This function performs a QR decomposition to shift the known current center to the right and move @@ -441,7 +451,9 @@ def shift_orthogonality_center_right(self, current_orthogonality_center: int, de self.tensors[current_orthogonality_center + 1], ) = (a_new, b_new) - def shift_orthogonality_center_left(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: + def shift_orthogonality_center_left( + self, current_orthogonality_center: int, decomposition: str = "QR" + ) -> None: """Shifts orthogonality center left. This function flips the network, performs a right shift, then flips the network again. @@ -452,10 +464,14 @@ def shift_orthogonality_center_left(self, current_orthogonality_center: int, dec Default is QR. """ self.flip_network() - self.shift_orthogonality_center_right(self.length - current_orthogonality_center - 1, decomposition) + self.shift_orthogonality_center_right( + self.length - current_orthogonality_center - 1, decomposition + ) self.flip_network() - def set_canonical_form(self, orthogonality_center: int, decomposition: str = "QR") -> None: + def set_canonical_form( + self, orthogonality_center: int, decomposition: str = "QR" + ) -> None: """Sets canonical form of MPS. Left and right normalizes an MPS around a selected site. @@ -466,7 +482,9 @@ def set_canonical_form(self, orthogonality_center: int, decomposition: str = "QR decomposition: Type of decomposition. Default QR. """ - def sweep_decomposition(orthogonality_center: int, decomposition: str = "QR") -> None: + def sweep_decomposition( + orthogonality_center: int, decomposition: str = "QR" + ) -> None: for site, _ in enumerate(self.tensors): if site == orthogonality_center: break @@ -497,13 +515,17 @@ def normalize(self, form: str = "B", decomposition: str = "QR") -> None: if form == "B": self.flip_network() - self.set_canonical_form(orthogonality_center=self.length - 1, decomposition=decomposition) + self.set_canonical_form( + orthogonality_center=self.length - 1, decomposition=decomposition + ) self.shift_orthogonality_center_right(self.length - 1, decomposition) if form == "B": self.flip_network() - def truncate(self, threshold: float = 1e-12, max_bond_dim: int | None = None) -> None: + def truncate( + self, threshold: float = 1e-12, max_bond_dim: int | None = None + ) -> None: """In-place MPS truncation via repeated two-site SVDs.""" orth_center = self.check_canonical_form()[0] if self.length == 1: @@ -525,7 +547,9 @@ def truncate(self, threshold: float = 1e-12, max_bond_dim: int | None = None) -> self.flip_network() - def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np.complex128: + def scalar_product( + self, other: MPS, sites: int | list[int] | None = None + ) -> np.complex128: """Compute the scalar (inner) product between two Matrix Product States (MPS). The function contracts the corresponding tensors of two MPS objects. If no specific site is @@ -552,8 +576,12 @@ def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np result = None for idx in range(self.length): # contract at each site into a 4-leg tensor - theta = oe.contract("abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx]) - result = theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) + theta = oe.contract( + "abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx] + ) + result = ( + theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) + ) # squeeze down to scalar assert result is not None return np.complex128(np.squeeze(result)) @@ -585,7 +613,9 @@ def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np msg = f"Invalid `sites` argument: {sites!r}" raise ValueError(msg) - def local_expect(self, operator: Observable, sites: int | list[int]) -> np.complex128: + def local_expect( + self, operator: Observable, sites: int | list[int] + ) -> np.complex128: """Compute the local expectation value of an operator on an MPS. The function applies the given operator to the tensor at the specified site of a deep copy of the @@ -611,11 +641,17 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl i = sites if isinstance(operator.sites, list): - assert operator.sites[0] == i, f"Operator sites mismatch {operator.sites[0]}, {i}" + assert ( + operator.sites[0] == i + ), f"Operator sites mismatch {operator.sites[0]}, {i}" elif isinstance(operator.sites, int): - assert operator.sites == i, f"Operator sites mismatch {operator.sites}, {i}" + assert ( + operator.sites == i + ), f"Operator sites mismatch {operator.sites}, {i}" - assert i is not None, f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" + assert ( + i is not None + ), f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" a = temp_state.tensors[i] temp_state.tensors[i] = oe.contract("ab, bcd->acd", operator.gate.matrix, a) @@ -626,10 +662,12 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl assert operator.sites[0] == i, "Observable sites mismatch" assert operator.sites[1] == j, "Observable sites mismatch" - assert operator.sites[0] < operator.sites[1], "Observable sites must be in ascending order." - assert operator.sites[1] - operator.sites[0] == 1, ( - "Only nearest-neighbor observables are currently implemented." - ) + assert ( + operator.sites[0] < operator.sites[1] + ), "Observable sites must be in ascending order." + assert ( + operator.sites[1] - operator.sites[0] == 1 + ), "Only nearest-neighbor observables are currently implemented." a = temp_state.tensors[i] b = temp_state.tensors[j] d_i, left, _ = a.shape @@ -641,7 +679,9 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl theta = theta.reshape(left, d_i * d_j, right) # (l, d_i*d_j, r) # 2) apply operator on the combined phys index - theta = oe.contract("ab, cbd->cad", operator.gate.matrix, theta) # (l, d_i*d_j, r) + theta = oe.contract( + "ab, cbd->cad", operator.gate.matrix, theta + ) # (l, d_i*d_j, r) theta = theta.reshape(left, d_i, d_j, right) # back to (l, d_i, d_j, r) # 3) split via SVD @@ -654,7 +694,9 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl u_tensor = u_mat.reshape(left, d_i, chi_new) # (l, d_i, r_new) a_new = u_tensor.transpose(1, 0, 2) # → (d_i, l, r_new) - v_tensor = (np.diag(s_vec) @ v_mat).reshape(chi_new, d_j, right) # (l_new, d_j, r) + v_tensor = (np.diag(s_vec) @ v_mat).reshape( + chi_new, d_j, right + ) # (l_new, d_j, r) b_new = v_tensor.transpose(1, 0, 2) # → (d_j, l_new, r) temp_state.tensors[i] = a_new @@ -690,24 +732,44 @@ def evaluate_observables( elif observable.gate.name == "total_bond": results[obs_index, column_index] = self.get_total_bond() elif observable.gate.name in {"entropy", "schmidt_spectrum"}: - assert isinstance(observable.sites, list), "Given metric requires a list of sites" - assert len(observable.sites) == 2, "Given metric requires 2 sites to act on." + assert isinstance( + observable.sites, list + ), "Given metric requires a list of sites" + assert ( + len(observable.sites) == 2 + ), "Given metric requires 2 sites to act on." max_site = max(observable.sites) min_site = min(observable.sites) - assert max_site - min_site == 1, "Entropy and Schmidt cuts must be nearest neighbor." + assert ( + max_site - min_site == 1 + ), "Entropy and Schmidt cuts must be nearest neighbor." for s in observable.sites: - assert s in range(self.length), f"Observable acting on non-existing site: {s}" + assert s in range( + self.length + ), f"Observable acting on non-existing site: {s}" if observable.gate.name == "entropy": - results[obs_index, column_index] = self.get_entropy(observable.sites) + results[obs_index, column_index] = self.get_entropy( + observable.sites + ) elif observable.gate.name == "schmidt_spectrum": - results[obs_index, column_index] = self.get_schmidt_spectrum(observable.sites) + results[obs_index, column_index] = self.get_schmidt_spectrum( + observable.sites + ) elif observable.gate.name == "pvm": - assert hasattr(observable.gate, "bitstring"), "Gate does not have attribute bitstring." - results[obs_index, column_index] = self.project_onto_bitstring(observable.gate.bitstring) + assert hasattr( + observable.gate, "bitstring" + ), "Gate does not have attribute bitstring." + results[obs_index, column_index] = self.project_onto_bitstring( + observable.gate.bitstring + ) else: - idx = observable.sites[0] if isinstance(observable.sites, list) else observable.sites + idx = ( + observable.sites[0] + if isinstance(observable.sites, list) + else observable.sites + ) if idx > last_site: for site in range(last_site, idx): temp_state.shift_orthogonality_center_right(site) @@ -732,16 +794,24 @@ def expect(self, observable: Observable) -> np.float64: elif isinstance(observable.sites, list): sites_list = observable.sites - assert sites_list is not None, f"Invalid type in expect {type(observable.sites).__name__}" + assert ( + sites_list is not None + ), f"Invalid type in expect {type(observable.sites).__name__}" - assert len(sites_list) < 3, "Only one- and two-site observables are currently implemented." + assert ( + len(sites_list) < 3 + ), "Only one- and two-site observables are currently implemented." for s in sites_list: - assert s in range(self.length), f"Observable acting on non-existing site: {s}" + assert s in range( + self.length + ), f"Observable acting on non-existing site: {s}" exp = self.local_expect(observable, sites_list) - assert exp.imag < 1e-13, f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." + assert ( + exp.imag < 1e-13 + ), f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." return exp.real def measure_single_shot(self) -> int: @@ -758,7 +828,9 @@ def measure_single_shot(self) -> int: temp_state = copy.deepcopy(self) bitstring = [] for site, tensor in enumerate(temp_state.tensors): - reduced_density_matrix = oe.contract("abc, dbc->ad", tensor, np.conj(tensor)) + reduced_density_matrix = oe.contract( + "abc, dbc->ad", tensor, np.conj(tensor) + ) probabilities = np.diag(reduced_density_matrix).real rng = np.random.default_rng() chosen_index = rng.choice(len(probabilities), p=probabilities) @@ -772,7 +844,9 @@ def measure_single_shot(self) -> int: temp_state.tensors[site + 1] = ( # noqa: B909 1 / np.sqrt(probabilities[chosen_index]) - * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) + * oe.contract( + "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] + ) ) return sum(c << i for i, c in enumerate(bitstring)) @@ -797,10 +871,14 @@ def measure_shots(self, shots: int) -> dict[int, int]: if shots > 1: max_workers = max(1, multiprocessing.cpu_count() - 1) with ( - concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor, + concurrent.futures.ProcessPoolExecutor( + max_workers=max_workers + ) as executor, tqdm(total=shots, desc="Measuring shots", ncols=80) as pbar, ): - futures = [executor.submit(self.measure_single_shot) for _ in range(shots)] + futures = [ + executor.submit(self.measure_single_shot) for _ in range(shots) + ] for future in concurrent.futures.as_completed(futures): result = future.result() results[result] = results.get(result, 0) + 1 @@ -824,7 +902,9 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: Returns: float: Probability of obtaining the given bitstring under projective measurement. """ - assert len(bitstring) == self.length, "Bitstring length must match number of sites" + assert ( + len(bitstring) == self.length + ), "Bitstring length must match number of sites" temp_state = copy.deepcopy(self) total_norm = 1.0 @@ -832,7 +912,9 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: state_index = int(char) tensor = temp_state.tensors[site] local_dim = self.physical_dimensions[site] - assert 0 <= state_index < local_dim, f"Invalid state index {state_index} at site {site}" + assert ( + 0 <= state_index < local_dim + ), f"Invalid state index {state_index} at site {site}" selected_state = np.zeros(local_dim) selected_state[state_index] = 1 @@ -849,7 +931,11 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: # Normalize and propagate if site != self.length - 1: temp_state.tensors[site + 1] = ( - 1 / norm * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) + 1 + / norm + * oe.contract( + "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] + ) ) return np.complex128(total_norm**2) @@ -1366,13 +1452,20 @@ def bose_hubbard( tensor[3, 3] = id_boson # build the full tensor list - tensors = [np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) for _ in range(length)] + tensors = [ + np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) + for _ in range(length) + ] # Left boundary: take only row 0 - tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype(np.complex128) + tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype( + np.complex128 + ) # Right boundary: take only col 3 - tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype(np.complex128) + tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype( + np.complex128 + ) mpo = cls() mpo.tensors = tensors @@ -1430,7 +1523,9 @@ def finite_state_machine( self.length = len(self.tensors) self.physical_dimension = self.tensors[0].shape[0] - def custom(self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True) -> None: + def custom( + self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True + ) -> None: """Custom MPO from tensors. Initialize the custom MPO (Matrix Product Operator) with the given tensors. @@ -1498,7 +1593,9 @@ def from_pauli_sum( self.physical_dimension = physical_dimension if not terms: - self.tensors = [np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length)] + self.tensors = [ + np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length) + ] return # 1. Parse terms into dense lists of operator names. @@ -1532,7 +1629,9 @@ def from_pauli_sum( term_trajectories[t_idx][length] = 0 # bond_state_maps[i] stores the mapping: (Op_str, Next_State_ID) -> Current_State_ID - bond_state_maps: list[dict[tuple[str, int], int]] = [{} for _ in range(length + 1)] + bond_state_maps: list[dict[tuple[str, int], int]] = [ + {} for _ in range(length + 1) + ] # Sweep Right-to-Left (sites L-1 down to 1) to build the FSM transitions. # We stop at bond 1. Bond 0 is always the single "Start" state. @@ -1598,7 +1697,9 @@ def from_pauli_sum( # The FSM construction is optimal for one-sided (suffix) uniqueness. # A standard two-sweep compression ("lr_rl") puts the MPO in canonical form # and removes any remaining redundancies (e.g., common prefixes). - self.compress(tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl") + self.compress( + tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl" + ) assert self.check_if_valid_mpo(), "MPO initialized wrong" def compress( @@ -1647,9 +1748,13 @@ def compress( for _ in range(n_sweeps): for direction in schedule: - self._compress_one_sweep(direction=direction, tol=tol, max_bond_dim=max_bond_dim) + self._compress_one_sweep( + direction=direction, tol=tol, max_bond_dim=max_bond_dim + ) - def _compress_one_sweep(self, *, direction: str, tol: float, max_bond_dim: int | None) -> None: + def _compress_one_sweep( + self, *, direction: str, tol: float, max_bond_dim: int | None + ) -> None: """Run one in-place MPO SVD compression sweep in the given direction. Args: @@ -1699,7 +1804,9 @@ def _compress_one_sweep(self, *, direction: str, tol: float, max_bond_dim: int | vh = vh[:keep, :] # Left tensor: (bond_dim_left, d, d, keep) -> (d, d, bond_dim_left, keep) - left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose(1, 2, 0, 3) + left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose( + 1, 2, 0, 3 + ) # Right tensor: (keep, d, d, bond_dim_right) -> (d, d, keep, bond_dim_right) svh = (s[:, None] * vh).reshape(keep, phys_dim, phys_dim, bond_dim_right) @@ -1798,7 +1905,8 @@ def from_matrix( max_bond (int | None): Maximum allowed bond dimension (before truncation). cutoff (float): - Singular values ``<= cutoff`` are discarded. + Singular values ``<= cutoff`` are discarded. By default all singular + values are included. Returns: MPO: @@ -1808,7 +1916,7 @@ def from_matrix( Raises: ValueError: If ``d <= 0``; - If ``d == 1`` but the matrix is not ``1 × 1``; + If ``d == 1`` but the matrix is not ``1 x 1``; If the matrix is not square; If ``rows`` is not a power of ``d``; If the inferred number of sites ``n < 1``. @@ -1837,9 +1945,7 @@ def from_matrix( if n < 1: msg = f"Inferred chain length n={n} is invalid; matrix dimension {rows} too small for base d={d}." - raise ValueError( - msg - ) + raise ValueError(msg) if not np.isclose(n_float, n): msg = f"Matrix dimension {rows} is not a power of d={d}." From 492dbdb62606270d3d5e760e4eef8af1cbcc2ff3 Mon Sep 17 00:00:00 2001 From: Lukas Date: Mon, 16 Feb 2026 13:46:12 +0100 Subject: [PATCH 15/25] better docstring MPO.from_matrix --- src/mqt/yaqs/core/data_structures/networks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/mqt/yaqs/core/data_structures/networks.py b/src/mqt/yaqs/core/data_structures/networks.py index e47b5931..1422c74f 100644 --- a/src/mqt/yaqs/core/data_structures/networks.py +++ b/src/mqt/yaqs/core/data_structures/networks.py @@ -1905,7 +1905,7 @@ def from_matrix( max_bond (int | None): Maximum allowed bond dimension (before truncation). cutoff (float): - Singular values ``<= cutoff`` are discarded. By default all singular + Singular values ``<= cutoff`` are discarded. By default cutoff=0: all singular values are included. Returns: @@ -1960,7 +1960,7 @@ def from_matrix( def _truncate(s: np.ndarray) -> int: r = s.size - if cutoff > 0.0: + if cutoff >= 0.0: r = max(int(np.sum(s > cutoff)), 1) if max_bond is not None: r = min(r, max_bond) From c7e9bbd26c5a41cdaff049f54df2d0a32c881023 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 16 Feb 2026 12:48:14 +0000 Subject: [PATCH 16/25] =?UTF-8?q?=F0=9F=8E=A8=20pre-commit=20fixes?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/mqt/yaqs/core/data_structures/networks.py | 215 +++++------------- 1 file changed, 54 insertions(+), 161 deletions(-) diff --git a/src/mqt/yaqs/core/data_structures/networks.py b/src/mqt/yaqs/core/data_structures/networks.py index 1422c74f..64815fd4 100644 --- a/src/mqt/yaqs/core/data_structures/networks.py +++ b/src/mqt/yaqs/core/data_structures/networks.py @@ -174,9 +174,7 @@ def __init__( vector[0] = rng.random() vector[1] = 1 - vector[0] elif state == "basis": - assert ( - basis_string is not None - ), "basis_string must be provided for 'basis' state initialization." + assert basis_string is not None, "basis_string must be provided for 'basis' state initialization." self.init_mps_from_basis(basis_string, self.physical_dimensions) break else: @@ -193,9 +191,7 @@ def __init__( if pad is not None: self.pad_bond_dimension(pad) - def init_mps_from_basis( - self, basis_string: str, physical_dimensions: list[int] - ) -> None: + def init_mps_from_basis(self, basis_string: str, physical_dimensions: list[int]) -> None: """Initialize a list of MPS tensors representing a product state from a basis string. Args: @@ -354,12 +350,8 @@ def get_schmidt_spectrum(self, sites: list[int]) -> NDArray[np.float64]: NDArray[np.float64]: The Schmidt spectrum (length 500), with unused entries filled with NaN. """ - assert ( - len(sites) == 2 - ), "Schmidt spectrum is defined on a bond (two adjacent sites)." - assert ( - sites[0] + 1 == sites[1] - ), "Schmidt spectrum only defined for nearest-neighbor cut." + assert len(sites) == 2, "Schmidt spectrum is defined on a bond (two adjacent sites)." + assert sites[0] + 1 == sites[1], "Schmidt spectrum only defined for nearest-neighbor cut." top_schmidt_vals = 500 i, j = sites a, b = self.tensors[i], self.tensors[j] @@ -415,9 +407,7 @@ def almost_equal(self, other: MPS) -> bool: return False return True - def shift_orthogonality_center_right( - self, current_orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def shift_orthogonality_center_right(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: """Shifts orthogonality center right. This function performs a QR decomposition to shift the known current center to the right and move @@ -451,9 +441,7 @@ def shift_orthogonality_center_right( self.tensors[current_orthogonality_center + 1], ) = (a_new, b_new) - def shift_orthogonality_center_left( - self, current_orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def shift_orthogonality_center_left(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: """Shifts orthogonality center left. This function flips the network, performs a right shift, then flips the network again. @@ -464,14 +452,10 @@ def shift_orthogonality_center_left( Default is QR. """ self.flip_network() - self.shift_orthogonality_center_right( - self.length - current_orthogonality_center - 1, decomposition - ) + self.shift_orthogonality_center_right(self.length - current_orthogonality_center - 1, decomposition) self.flip_network() - def set_canonical_form( - self, orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def set_canonical_form(self, orthogonality_center: int, decomposition: str = "QR") -> None: """Sets canonical form of MPS. Left and right normalizes an MPS around a selected site. @@ -482,9 +466,7 @@ def set_canonical_form( decomposition: Type of decomposition. Default QR. """ - def sweep_decomposition( - orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def sweep_decomposition(orthogonality_center: int, decomposition: str = "QR") -> None: for site, _ in enumerate(self.tensors): if site == orthogonality_center: break @@ -515,17 +497,13 @@ def normalize(self, form: str = "B", decomposition: str = "QR") -> None: if form == "B": self.flip_network() - self.set_canonical_form( - orthogonality_center=self.length - 1, decomposition=decomposition - ) + self.set_canonical_form(orthogonality_center=self.length - 1, decomposition=decomposition) self.shift_orthogonality_center_right(self.length - 1, decomposition) if form == "B": self.flip_network() - def truncate( - self, threshold: float = 1e-12, max_bond_dim: int | None = None - ) -> None: + def truncate(self, threshold: float = 1e-12, max_bond_dim: int | None = None) -> None: """In-place MPS truncation via repeated two-site SVDs.""" orth_center = self.check_canonical_form()[0] if self.length == 1: @@ -547,9 +525,7 @@ def truncate( self.flip_network() - def scalar_product( - self, other: MPS, sites: int | list[int] | None = None - ) -> np.complex128: + def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np.complex128: """Compute the scalar (inner) product between two Matrix Product States (MPS). The function contracts the corresponding tensors of two MPS objects. If no specific site is @@ -576,12 +552,8 @@ def scalar_product( result = None for idx in range(self.length): # contract at each site into a 4-leg tensor - theta = oe.contract( - "abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx] - ) - result = ( - theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) - ) + theta = oe.contract("abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx]) + result = theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) # squeeze down to scalar assert result is not None return np.complex128(np.squeeze(result)) @@ -613,9 +585,7 @@ def scalar_product( msg = f"Invalid `sites` argument: {sites!r}" raise ValueError(msg) - def local_expect( - self, operator: Observable, sites: int | list[int] - ) -> np.complex128: + def local_expect(self, operator: Observable, sites: int | list[int]) -> np.complex128: """Compute the local expectation value of an operator on an MPS. The function applies the given operator to the tensor at the specified site of a deep copy of the @@ -641,17 +611,11 @@ def local_expect( i = sites if isinstance(operator.sites, list): - assert ( - operator.sites[0] == i - ), f"Operator sites mismatch {operator.sites[0]}, {i}" + assert operator.sites[0] == i, f"Operator sites mismatch {operator.sites[0]}, {i}" elif isinstance(operator.sites, int): - assert ( - operator.sites == i - ), f"Operator sites mismatch {operator.sites}, {i}" + assert operator.sites == i, f"Operator sites mismatch {operator.sites}, {i}" - assert ( - i is not None - ), f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" + assert i is not None, f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" a = temp_state.tensors[i] temp_state.tensors[i] = oe.contract("ab, bcd->acd", operator.gate.matrix, a) @@ -662,12 +626,10 @@ def local_expect( assert operator.sites[0] == i, "Observable sites mismatch" assert operator.sites[1] == j, "Observable sites mismatch" - assert ( - operator.sites[0] < operator.sites[1] - ), "Observable sites must be in ascending order." - assert ( - operator.sites[1] - operator.sites[0] == 1 - ), "Only nearest-neighbor observables are currently implemented." + assert operator.sites[0] < operator.sites[1], "Observable sites must be in ascending order." + assert operator.sites[1] - operator.sites[0] == 1, ( + "Only nearest-neighbor observables are currently implemented." + ) a = temp_state.tensors[i] b = temp_state.tensors[j] d_i, left, _ = a.shape @@ -679,9 +641,7 @@ def local_expect( theta = theta.reshape(left, d_i * d_j, right) # (l, d_i*d_j, r) # 2) apply operator on the combined phys index - theta = oe.contract( - "ab, cbd->cad", operator.gate.matrix, theta - ) # (l, d_i*d_j, r) + theta = oe.contract("ab, cbd->cad", operator.gate.matrix, theta) # (l, d_i*d_j, r) theta = theta.reshape(left, d_i, d_j, right) # back to (l, d_i, d_j, r) # 3) split via SVD @@ -694,9 +654,7 @@ def local_expect( u_tensor = u_mat.reshape(left, d_i, chi_new) # (l, d_i, r_new) a_new = u_tensor.transpose(1, 0, 2) # → (d_i, l, r_new) - v_tensor = (np.diag(s_vec) @ v_mat).reshape( - chi_new, d_j, right - ) # (l_new, d_j, r) + v_tensor = (np.diag(s_vec) @ v_mat).reshape(chi_new, d_j, right) # (l_new, d_j, r) b_new = v_tensor.transpose(1, 0, 2) # → (d_j, l_new, r) temp_state.tensors[i] = a_new @@ -732,44 +690,24 @@ def evaluate_observables( elif observable.gate.name == "total_bond": results[obs_index, column_index] = self.get_total_bond() elif observable.gate.name in {"entropy", "schmidt_spectrum"}: - assert isinstance( - observable.sites, list - ), "Given metric requires a list of sites" - assert ( - len(observable.sites) == 2 - ), "Given metric requires 2 sites to act on." + assert isinstance(observable.sites, list), "Given metric requires a list of sites" + assert len(observable.sites) == 2, "Given metric requires 2 sites to act on." max_site = max(observable.sites) min_site = min(observable.sites) - assert ( - max_site - min_site == 1 - ), "Entropy and Schmidt cuts must be nearest neighbor." + assert max_site - min_site == 1, "Entropy and Schmidt cuts must be nearest neighbor." for s in observable.sites: - assert s in range( - self.length - ), f"Observable acting on non-existing site: {s}" + assert s in range(self.length), f"Observable acting on non-existing site: {s}" if observable.gate.name == "entropy": - results[obs_index, column_index] = self.get_entropy( - observable.sites - ) + results[obs_index, column_index] = self.get_entropy(observable.sites) elif observable.gate.name == "schmidt_spectrum": - results[obs_index, column_index] = self.get_schmidt_spectrum( - observable.sites - ) + results[obs_index, column_index] = self.get_schmidt_spectrum(observable.sites) elif observable.gate.name == "pvm": - assert hasattr( - observable.gate, "bitstring" - ), "Gate does not have attribute bitstring." - results[obs_index, column_index] = self.project_onto_bitstring( - observable.gate.bitstring - ) + assert hasattr(observable.gate, "bitstring"), "Gate does not have attribute bitstring." + results[obs_index, column_index] = self.project_onto_bitstring(observable.gate.bitstring) else: - idx = ( - observable.sites[0] - if isinstance(observable.sites, list) - else observable.sites - ) + idx = observable.sites[0] if isinstance(observable.sites, list) else observable.sites if idx > last_site: for site in range(last_site, idx): temp_state.shift_orthogonality_center_right(site) @@ -794,24 +732,16 @@ def expect(self, observable: Observable) -> np.float64: elif isinstance(observable.sites, list): sites_list = observable.sites - assert ( - sites_list is not None - ), f"Invalid type in expect {type(observable.sites).__name__}" + assert sites_list is not None, f"Invalid type in expect {type(observable.sites).__name__}" - assert ( - len(sites_list) < 3 - ), "Only one- and two-site observables are currently implemented." + assert len(sites_list) < 3, "Only one- and two-site observables are currently implemented." for s in sites_list: - assert s in range( - self.length - ), f"Observable acting on non-existing site: {s}" + assert s in range(self.length), f"Observable acting on non-existing site: {s}" exp = self.local_expect(observable, sites_list) - assert ( - exp.imag < 1e-13 - ), f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." + assert exp.imag < 1e-13, f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." return exp.real def measure_single_shot(self) -> int: @@ -828,9 +758,7 @@ def measure_single_shot(self) -> int: temp_state = copy.deepcopy(self) bitstring = [] for site, tensor in enumerate(temp_state.tensors): - reduced_density_matrix = oe.contract( - "abc, dbc->ad", tensor, np.conj(tensor) - ) + reduced_density_matrix = oe.contract("abc, dbc->ad", tensor, np.conj(tensor)) probabilities = np.diag(reduced_density_matrix).real rng = np.random.default_rng() chosen_index = rng.choice(len(probabilities), p=probabilities) @@ -844,9 +772,7 @@ def measure_single_shot(self) -> int: temp_state.tensors[site + 1] = ( # noqa: B909 1 / np.sqrt(probabilities[chosen_index]) - * oe.contract( - "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] - ) + * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) ) return sum(c << i for i, c in enumerate(bitstring)) @@ -871,14 +797,10 @@ def measure_shots(self, shots: int) -> dict[int, int]: if shots > 1: max_workers = max(1, multiprocessing.cpu_count() - 1) with ( - concurrent.futures.ProcessPoolExecutor( - max_workers=max_workers - ) as executor, + concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor, tqdm(total=shots, desc="Measuring shots", ncols=80) as pbar, ): - futures = [ - executor.submit(self.measure_single_shot) for _ in range(shots) - ] + futures = [executor.submit(self.measure_single_shot) for _ in range(shots)] for future in concurrent.futures.as_completed(futures): result = future.result() results[result] = results.get(result, 0) + 1 @@ -902,9 +824,7 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: Returns: float: Probability of obtaining the given bitstring under projective measurement. """ - assert ( - len(bitstring) == self.length - ), "Bitstring length must match number of sites" + assert len(bitstring) == self.length, "Bitstring length must match number of sites" temp_state = copy.deepcopy(self) total_norm = 1.0 @@ -912,9 +832,7 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: state_index = int(char) tensor = temp_state.tensors[site] local_dim = self.physical_dimensions[site] - assert ( - 0 <= state_index < local_dim - ), f"Invalid state index {state_index} at site {site}" + assert 0 <= state_index < local_dim, f"Invalid state index {state_index} at site {site}" selected_state = np.zeros(local_dim) selected_state[state_index] = 1 @@ -931,11 +849,7 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: # Normalize and propagate if site != self.length - 1: temp_state.tensors[site + 1] = ( - 1 - / norm - * oe.contract( - "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] - ) + 1 / norm * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) ) return np.complex128(total_norm**2) @@ -1452,20 +1366,13 @@ def bose_hubbard( tensor[3, 3] = id_boson # build the full tensor list - tensors = [ - np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) - for _ in range(length) - ] + tensors = [np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) for _ in range(length)] # Left boundary: take only row 0 - tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype( - np.complex128 - ) + tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype(np.complex128) # Right boundary: take only col 3 - tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype( - np.complex128 - ) + tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype(np.complex128) mpo = cls() mpo.tensors = tensors @@ -1523,9 +1430,7 @@ def finite_state_machine( self.length = len(self.tensors) self.physical_dimension = self.tensors[0].shape[0] - def custom( - self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True - ) -> None: + def custom(self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True) -> None: """Custom MPO from tensors. Initialize the custom MPO (Matrix Product Operator) with the given tensors. @@ -1593,9 +1498,7 @@ def from_pauli_sum( self.physical_dimension = physical_dimension if not terms: - self.tensors = [ - np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length) - ] + self.tensors = [np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length)] return # 1. Parse terms into dense lists of operator names. @@ -1629,9 +1532,7 @@ def from_pauli_sum( term_trajectories[t_idx][length] = 0 # bond_state_maps[i] stores the mapping: (Op_str, Next_State_ID) -> Current_State_ID - bond_state_maps: list[dict[tuple[str, int], int]] = [ - {} for _ in range(length + 1) - ] + bond_state_maps: list[dict[tuple[str, int], int]] = [{} for _ in range(length + 1)] # Sweep Right-to-Left (sites L-1 down to 1) to build the FSM transitions. # We stop at bond 1. Bond 0 is always the single "Start" state. @@ -1697,9 +1598,7 @@ def from_pauli_sum( # The FSM construction is optimal for one-sided (suffix) uniqueness. # A standard two-sweep compression ("lr_rl") puts the MPO in canonical form # and removes any remaining redundancies (e.g., common prefixes). - self.compress( - tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl" - ) + self.compress(tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl") assert self.check_if_valid_mpo(), "MPO initialized wrong" def compress( @@ -1748,13 +1647,9 @@ def compress( for _ in range(n_sweeps): for direction in schedule: - self._compress_one_sweep( - direction=direction, tol=tol, max_bond_dim=max_bond_dim - ) + self._compress_one_sweep(direction=direction, tol=tol, max_bond_dim=max_bond_dim) - def _compress_one_sweep( - self, *, direction: str, tol: float, max_bond_dim: int | None - ) -> None: + def _compress_one_sweep(self, *, direction: str, tol: float, max_bond_dim: int | None) -> None: """Run one in-place MPO SVD compression sweep in the given direction. Args: @@ -1804,9 +1699,7 @@ def _compress_one_sweep( vh = vh[:keep, :] # Left tensor: (bond_dim_left, d, d, keep) -> (d, d, bond_dim_left, keep) - left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose( - 1, 2, 0, 3 - ) + left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose(1, 2, 0, 3) # Right tensor: (keep, d, d, bond_dim_right) -> (d, d, keep, bond_dim_right) svh = (s[:, None] * vh).reshape(keep, phys_dim, phys_dim, bond_dim_right) From ee4d257b2ccd5738d3f4f7ccf150a7f43f0e4218 Mon Sep 17 00:00:00 2001 From: Lukas Date: Mon, 16 Feb 2026 14:10:18 +0100 Subject: [PATCH 17/25] docstring --- src/mqt/yaqs/core/data_structures/networks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/mqt/yaqs/core/data_structures/networks.py b/src/mqt/yaqs/core/data_structures/networks.py index 1422c74f..340a10e1 100644 --- a/src/mqt/yaqs/core/data_structures/networks.py +++ b/src/mqt/yaqs/core/data_structures/networks.py @@ -1905,7 +1905,7 @@ def from_matrix( max_bond (int | None): Maximum allowed bond dimension (before truncation). cutoff (float): - Singular values ``<= cutoff`` are discarded. By default cutoff=0: all singular + Singular values ``<= cutoff`` are discarded. By default cutoff=0: all non-zero singular values are included. Returns: @@ -1960,7 +1960,7 @@ def from_matrix( def _truncate(s: np.ndarray) -> int: r = s.size - if cutoff >= 0.0: + if cutoff > 0.0: r = max(int(np.sum(s > cutoff)), 1) if max_bond is not None: r = min(r, max_bond) From 674c92beab32b253ab806ffd995b8f87ef93000d Mon Sep 17 00:00:00 2001 From: Lukas Date: Mon, 16 Feb 2026 15:46:33 +0100 Subject: [PATCH 18/25] increased codecov --- src/mqt/yaqs/core/data_structures/networks.py | 272 +++++++++++------- tests/core/data_structures/test_networks.py | 69 ++++- 2 files changed, 229 insertions(+), 112 deletions(-) diff --git a/src/mqt/yaqs/core/data_structures/networks.py b/src/mqt/yaqs/core/data_structures/networks.py index 1ebfef72..cf24f0f4 100644 --- a/src/mqt/yaqs/core/data_structures/networks.py +++ b/src/mqt/yaqs/core/data_structures/networks.py @@ -174,7 +174,9 @@ def __init__( vector[0] = rng.random() vector[1] = 1 - vector[0] elif state == "basis": - assert basis_string is not None, "basis_string must be provided for 'basis' state initialization." + assert ( + basis_string is not None + ), "basis_string must be provided for 'basis' state initialization." self.init_mps_from_basis(basis_string, self.physical_dimensions) break else: @@ -191,7 +193,9 @@ def __init__( if pad is not None: self.pad_bond_dimension(pad) - def init_mps_from_basis(self, basis_string: str, physical_dimensions: list[int]) -> None: + def init_mps_from_basis( + self, basis_string: str, physical_dimensions: list[int] + ) -> None: """Initialize a list of MPS tensors representing a product state from a basis string. Args: @@ -350,8 +354,12 @@ def get_schmidt_spectrum(self, sites: list[int]) -> NDArray[np.float64]: NDArray[np.float64]: The Schmidt spectrum (length 500), with unused entries filled with NaN. """ - assert len(sites) == 2, "Schmidt spectrum is defined on a bond (two adjacent sites)." - assert sites[0] + 1 == sites[1], "Schmidt spectrum only defined for nearest-neighbor cut." + assert ( + len(sites) == 2 + ), "Schmidt spectrum is defined on a bond (two adjacent sites)." + assert ( + sites[0] + 1 == sites[1] + ), "Schmidt spectrum only defined for nearest-neighbor cut." top_schmidt_vals = 500 i, j = sites a, b = self.tensors[i], self.tensors[j] @@ -407,7 +415,9 @@ def almost_equal(self, other: MPS) -> bool: return False return True - def shift_orthogonality_center_right(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: + def shift_orthogonality_center_right( + self, current_orthogonality_center: int, decomposition: str = "QR" + ) -> None: """Shifts orthogonality center right. This function performs a QR decomposition to shift the known current center to the right and move @@ -441,7 +451,9 @@ def shift_orthogonality_center_right(self, current_orthogonality_center: int, de self.tensors[current_orthogonality_center + 1], ) = (a_new, b_new) - def shift_orthogonality_center_left(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: + def shift_orthogonality_center_left( + self, current_orthogonality_center: int, decomposition: str = "QR" + ) -> None: """Shifts orthogonality center left. This function flips the network, performs a right shift, then flips the network again. @@ -452,10 +464,14 @@ def shift_orthogonality_center_left(self, current_orthogonality_center: int, dec Default is QR. """ self.flip_network() - self.shift_orthogonality_center_right(self.length - current_orthogonality_center - 1, decomposition) + self.shift_orthogonality_center_right( + self.length - current_orthogonality_center - 1, decomposition + ) self.flip_network() - def set_canonical_form(self, orthogonality_center: int, decomposition: str = "QR") -> None: + def set_canonical_form( + self, orthogonality_center: int, decomposition: str = "QR" + ) -> None: """Sets canonical form of MPS. Left and right normalizes an MPS around a selected site. @@ -466,7 +482,9 @@ def set_canonical_form(self, orthogonality_center: int, decomposition: str = "QR decomposition: Type of decomposition. Default QR. """ - def sweep_decomposition(orthogonality_center: int, decomposition: str = "QR") -> None: + def sweep_decomposition( + orthogonality_center: int, decomposition: str = "QR" + ) -> None: for site, _ in enumerate(self.tensors): if site == orthogonality_center: break @@ -497,13 +515,17 @@ def normalize(self, form: str = "B", decomposition: str = "QR") -> None: if form == "B": self.flip_network() - self.set_canonical_form(orthogonality_center=self.length - 1, decomposition=decomposition) + self.set_canonical_form( + orthogonality_center=self.length - 1, decomposition=decomposition + ) self.shift_orthogonality_center_right(self.length - 1, decomposition) if form == "B": self.flip_network() - def truncate(self, threshold: float = 1e-12, max_bond_dim: int | None = None) -> None: + def truncate( + self, threshold: float = 1e-12, max_bond_dim: int | None = None + ) -> None: """In-place MPS truncation via repeated two-site SVDs.""" orth_center = self.check_canonical_form()[0] if self.length == 1: @@ -525,7 +547,9 @@ def truncate(self, threshold: float = 1e-12, max_bond_dim: int | None = None) -> self.flip_network() - def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np.complex128: + def scalar_product( + self, other: MPS, sites: int | list[int] | None = None + ) -> np.complex128: """Compute the scalar (inner) product between two Matrix Product States (MPS). The function contracts the corresponding tensors of two MPS objects. If no specific site is @@ -552,8 +576,12 @@ def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np result = None for idx in range(self.length): # contract at each site into a 4-leg tensor - theta = oe.contract("abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx]) - result = theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) + theta = oe.contract( + "abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx] + ) + result = ( + theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) + ) # squeeze down to scalar assert result is not None return np.complex128(np.squeeze(result)) @@ -585,7 +613,9 @@ def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np msg = f"Invalid `sites` argument: {sites!r}" raise ValueError(msg) - def local_expect(self, operator: Observable, sites: int | list[int]) -> np.complex128: + def local_expect( + self, operator: Observable, sites: int | list[int] + ) -> np.complex128: """Compute the local expectation value of an operator on an MPS. The function applies the given operator to the tensor at the specified site of a deep copy of the @@ -611,11 +641,17 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl i = sites if isinstance(operator.sites, list): - assert operator.sites[0] == i, f"Operator sites mismatch {operator.sites[0]}, {i}" + assert ( + operator.sites[0] == i + ), f"Operator sites mismatch {operator.sites[0]}, {i}" elif isinstance(operator.sites, int): - assert operator.sites == i, f"Operator sites mismatch {operator.sites}, {i}" + assert ( + operator.sites == i + ), f"Operator sites mismatch {operator.sites}, {i}" - assert i is not None, f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" + assert ( + i is not None + ), f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" a = temp_state.tensors[i] temp_state.tensors[i] = oe.contract("ab, bcd->acd", operator.gate.matrix, a) @@ -626,10 +662,12 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl assert operator.sites[0] == i, "Observable sites mismatch" assert operator.sites[1] == j, "Observable sites mismatch" - assert operator.sites[0] < operator.sites[1], "Observable sites must be in ascending order." - assert operator.sites[1] - operator.sites[0] == 1, ( - "Only nearest-neighbor observables are currently implemented." - ) + assert ( + operator.sites[0] < operator.sites[1] + ), "Observable sites must be in ascending order." + assert ( + operator.sites[1] - operator.sites[0] == 1 + ), "Only nearest-neighbor observables are currently implemented." a = temp_state.tensors[i] b = temp_state.tensors[j] d_i, left, _ = a.shape @@ -641,7 +679,9 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl theta = theta.reshape(left, d_i * d_j, right) # (l, d_i*d_j, r) # 2) apply operator on the combined phys index - theta = oe.contract("ab, cbd->cad", operator.gate.matrix, theta) # (l, d_i*d_j, r) + theta = oe.contract( + "ab, cbd->cad", operator.gate.matrix, theta + ) # (l, d_i*d_j, r) theta = theta.reshape(left, d_i, d_j, right) # back to (l, d_i, d_j, r) # 3) split via SVD @@ -654,7 +694,9 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl u_tensor = u_mat.reshape(left, d_i, chi_new) # (l, d_i, r_new) a_new = u_tensor.transpose(1, 0, 2) # → (d_i, l, r_new) - v_tensor = (np.diag(s_vec) @ v_mat).reshape(chi_new, d_j, right) # (l_new, d_j, r) + v_tensor = (np.diag(s_vec) @ v_mat).reshape( + chi_new, d_j, right + ) # (l_new, d_j, r) b_new = v_tensor.transpose(1, 0, 2) # → (d_j, l_new, r) temp_state.tensors[i] = a_new @@ -690,24 +732,44 @@ def evaluate_observables( elif observable.gate.name == "total_bond": results[obs_index, column_index] = self.get_total_bond() elif observable.gate.name in {"entropy", "schmidt_spectrum"}: - assert isinstance(observable.sites, list), "Given metric requires a list of sites" - assert len(observable.sites) == 2, "Given metric requires 2 sites to act on." + assert isinstance( + observable.sites, list + ), "Given metric requires a list of sites" + assert ( + len(observable.sites) == 2 + ), "Given metric requires 2 sites to act on." max_site = max(observable.sites) min_site = min(observable.sites) - assert max_site - min_site == 1, "Entropy and Schmidt cuts must be nearest neighbor." + assert ( + max_site - min_site == 1 + ), "Entropy and Schmidt cuts must be nearest neighbor." for s in observable.sites: - assert s in range(self.length), f"Observable acting on non-existing site: {s}" + assert s in range( + self.length + ), f"Observable acting on non-existing site: {s}" if observable.gate.name == "entropy": - results[obs_index, column_index] = self.get_entropy(observable.sites) + results[obs_index, column_index] = self.get_entropy( + observable.sites + ) elif observable.gate.name == "schmidt_spectrum": - results[obs_index, column_index] = self.get_schmidt_spectrum(observable.sites) + results[obs_index, column_index] = self.get_schmidt_spectrum( + observable.sites + ) elif observable.gate.name == "pvm": - assert hasattr(observable.gate, "bitstring"), "Gate does not have attribute bitstring." - results[obs_index, column_index] = self.project_onto_bitstring(observable.gate.bitstring) + assert hasattr( + observable.gate, "bitstring" + ), "Gate does not have attribute bitstring." + results[obs_index, column_index] = self.project_onto_bitstring( + observable.gate.bitstring + ) else: - idx = observable.sites[0] if isinstance(observable.sites, list) else observable.sites + idx = ( + observable.sites[0] + if isinstance(observable.sites, list) + else observable.sites + ) if idx > last_site: for site in range(last_site, idx): temp_state.shift_orthogonality_center_right(site) @@ -732,16 +794,24 @@ def expect(self, observable: Observable) -> np.float64: elif isinstance(observable.sites, list): sites_list = observable.sites - assert sites_list is not None, f"Invalid type in expect {type(observable.sites).__name__}" + assert ( + sites_list is not None + ), f"Invalid type in expect {type(observable.sites).__name__}" - assert len(sites_list) < 3, "Only one- and two-site observables are currently implemented." + assert ( + len(sites_list) < 3 + ), "Only one- and two-site observables are currently implemented." for s in sites_list: - assert s in range(self.length), f"Observable acting on non-existing site: {s}" + assert s in range( + self.length + ), f"Observable acting on non-existing site: {s}" exp = self.local_expect(observable, sites_list) - assert exp.imag < 1e-13, f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." + assert ( + exp.imag < 1e-13 + ), f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." return exp.real def measure_single_shot(self) -> int: @@ -758,7 +828,9 @@ def measure_single_shot(self) -> int: temp_state = copy.deepcopy(self) bitstring = [] for site, tensor in enumerate(temp_state.tensors): - reduced_density_matrix = oe.contract("abc, dbc->ad", tensor, np.conj(tensor)) + reduced_density_matrix = oe.contract( + "abc, dbc->ad", tensor, np.conj(tensor) + ) probabilities = np.diag(reduced_density_matrix).real rng = np.random.default_rng() chosen_index = rng.choice(len(probabilities), p=probabilities) @@ -772,7 +844,9 @@ def measure_single_shot(self) -> int: temp_state.tensors[site + 1] = ( # noqa: B909 1 / np.sqrt(probabilities[chosen_index]) - * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) + * oe.contract( + "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] + ) ) return sum(c << i for i, c in enumerate(bitstring)) @@ -797,10 +871,14 @@ def measure_shots(self, shots: int) -> dict[int, int]: if shots > 1: max_workers = max(1, multiprocessing.cpu_count() - 1) with ( - concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor, + concurrent.futures.ProcessPoolExecutor( + max_workers=max_workers + ) as executor, tqdm(total=shots, desc="Measuring shots", ncols=80) as pbar, ): - futures = [executor.submit(self.measure_single_shot) for _ in range(shots)] + futures = [ + executor.submit(self.measure_single_shot) for _ in range(shots) + ] for future in concurrent.futures.as_completed(futures): result = future.result() results[result] = results.get(result, 0) + 1 @@ -824,7 +902,9 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: Returns: float: Probability of obtaining the given bitstring under projective measurement. """ - assert len(bitstring) == self.length, "Bitstring length must match number of sites" + assert ( + len(bitstring) == self.length + ), "Bitstring length must match number of sites" temp_state = copy.deepcopy(self) total_norm = 1.0 @@ -832,7 +912,9 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: state_index = int(char) tensor = temp_state.tensors[site] local_dim = self.physical_dimensions[site] - assert 0 <= state_index < local_dim, f"Invalid state index {state_index} at site {site}" + assert ( + 0 <= state_index < local_dim + ), f"Invalid state index {state_index} at site {site}" selected_state = np.zeros(local_dim) selected_state[state_index] = 1 @@ -849,7 +931,11 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: # Normalize and propagate if site != self.length - 1: temp_state.tensors[site + 1] = ( - 1 / norm * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) + 1 + / norm + * oe.contract( + "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] + ) ) return np.complex128(total_norm**2) @@ -1366,13 +1452,20 @@ def bose_hubbard( tensor[3, 3] = id_boson # build the full tensor list - tensors = [np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) for _ in range(length)] + tensors = [ + np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) + for _ in range(length) + ] # Left boundary: take only row 0 - tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype(np.complex128) + tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype( + np.complex128 + ) # Right boundary: take only col 3 - tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype(np.complex128) + tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype( + np.complex128 + ) mpo = cls() mpo.tensors = tensors @@ -1430,7 +1523,9 @@ def finite_state_machine( self.length = len(self.tensors) self.physical_dimension = self.tensors[0].shape[0] - def custom(self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True) -> None: + def custom( + self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True + ) -> None: """Custom MPO from tensors. Initialize the custom MPO (Matrix Product Operator) with the given tensors. @@ -1498,7 +1593,9 @@ def from_pauli_sum( self.physical_dimension = physical_dimension if not terms: - self.tensors = [np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length)] + self.tensors = [ + np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length) + ] return # 1. Parse terms into dense lists of operator names. @@ -1532,7 +1629,9 @@ def from_pauli_sum( term_trajectories[t_idx][length] = 0 # bond_state_maps[i] stores the mapping: (Op_str, Next_State_ID) -> Current_State_ID - bond_state_maps: list[dict[tuple[str, int], int]] = [{} for _ in range(length + 1)] + bond_state_maps: list[dict[tuple[str, int], int]] = [ + {} for _ in range(length + 1) + ] # Sweep Right-to-Left (sites L-1 down to 1) to build the FSM transitions. # We stop at bond 1. Bond 0 is always the single "Start" state. @@ -1598,7 +1697,9 @@ def from_pauli_sum( # The FSM construction is optimal for one-sided (suffix) uniqueness. # A standard two-sweep compression ("lr_rl") puts the MPO in canonical form # and removes any remaining redundancies (e.g., common prefixes). - self.compress(tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl") + self.compress( + tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl" + ) assert self.check_if_valid_mpo(), "MPO initialized wrong" def compress( @@ -1647,9 +1748,13 @@ def compress( for _ in range(n_sweeps): for direction in schedule: - self._compress_one_sweep(direction=direction, tol=tol, max_bond_dim=max_bond_dim) + self._compress_one_sweep( + direction=direction, tol=tol, max_bond_dim=max_bond_dim + ) - def _compress_one_sweep(self, *, direction: str, tol: float, max_bond_dim: int | None) -> None: + def _compress_one_sweep( + self, *, direction: str, tol: float, max_bond_dim: int | None + ) -> None: """Run one in-place MPO SVD compression sweep in the given direction. Args: @@ -1699,7 +1804,9 @@ def _compress_one_sweep(self, *, direction: str, tol: float, max_bond_dim: int | vh = vh[:keep, :] # Left tensor: (bond_dim_left, d, d, keep) -> (d, d, bond_dim_left, keep) - left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose(1, 2, 0, 3) + left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose( + 1, 2, 0, 3 + ) # Right tensor: (keep, d, d, bond_dim_right) -> (d, d, keep, bond_dim_right) svh = (s[:, None] * vh).reshape(keep, phys_dim, phys_dim, bond_dim_right) @@ -1783,38 +1890,7 @@ def from_matrix( max_bond: int | None = None, cutoff: float = 0.0, ) -> MPO: - """Factorize a dense matrix into an MPO with uniform local dimension ``d``. - - Each site has local shape ``(d, d)``. - The number of sites ``n`` is inferred from the relation: - - mat.shape = (d**n, d**n) - - Args: - mat (np.ndarray): - Square matrix of shape ``(d**n, d**n)``. - d (int): - Physical dimension per site. Must satisfy ``d > 0``. - max_bond (int | None): - Maximum allowed bond dimension (before truncation). - cutoff (float): - Singular values ``<= cutoff`` are discarded. By default cutoff=0: all non-zero singular - values are included. - - Returns: - MPO: - An MPO with ``n`` sites, uniform physical dimension ``d`` per site, - and bond dimensions determined by SVD truncation. - - Raises: - ValueError: - If ``d <= 0``; - If ``d == 1`` but the matrix is not ``1 x 1``; - If the matrix is not square; - If ``rows`` is not a power of ``d``; - If the inferred number of sites ``n < 1``. - """ - # --- validate d --- + """Factorize a dense matrix into an MPO with uniform local dimension ``d``.""" if d <= 0: msg = f"Physical dimension d must be > 0, got d={d}." raise ValueError(msg) @@ -1825,14 +1901,12 @@ def from_matrix( msg = "Matrix must be square for uniform MPO factorization." raise ValueError(msg) - # Special case: d == 1 if d == 1: if rows != 1: - msg = "For d == 1 the matrix must be 1×1 since 1**n = 1 for any n." + msg = "For d == 1 the matrix must be 1x1 since 1**n = 1 for any n." raise ValueError(msg) n = 1 else: - # --- infer chain length --- n_float = np.log(rows) / np.log(d) n = round(n_float) @@ -1859,32 +1933,30 @@ def _truncate(s: np.ndarray) -> int: r = min(r, max_bond) return r - # SVD sweep for k in range(n - 1): rest = d ** (n - k - 1) rem = rem.reshape(left_rank, d, rest, d, rest) rem_perm = np.transpose(rem, (1, 3, 0, 2, 4)) - X = rem_perm.reshape(d * d * left_rank, rest * rest) + x = rem_perm.reshape(d * d * left_rank, rest * rest) - U, s, Vh = np.linalg.svd(X, full_matrices=False) + u, s, vh = np.linalg.svd(x, full_matrices=False) r_keep = _truncate(s) - U = U[:, :r_keep] + u = u[:, :r_keep] s = s[:r_keep] - Vh = Vh[:r_keep, :] + vh = vh[:r_keep, :] - T_k = U.reshape(d, d, left_rank, r_keep) - tensors.append(T_k) + t_k = u.reshape(d, d, left_rank, r_keep) + tensors.append(t_k) - rem = (s[:, None] * Vh).reshape(r_keep, rest, rest) + rem = (s[:, None] * vh).reshape(r_keep, rest, rest) left_rank = r_keep - # last site rem = rem.reshape(left_rank, d, d) - T_last = np.transpose(rem, (1, 2, 0)).reshape(d, d, left_rank, 1) - tensors.append(T_last) + t_last = np.transpose(rem, (1, 2, 0)).reshape(d, d, left_rank, 1) + tensors.append(t_last) mpo = cls() mpo.tensors = tensors diff --git a/tests/core/data_structures/test_networks.py b/tests/core/data_structures/test_networks.py index ab738266..b25cafbe 100644 --- a/tests/core/data_structures/test_networks.py +++ b/tests/core/data_structures/test_networks.py @@ -64,7 +64,9 @@ def _embed_one_body(op: np.ndarray, length: int, i: int) -> np.ndarray: return out -def _embed_two_body(op1: np.ndarray, op2: np.ndarray, length: int, i: int) -> np.ndarray: +def _embed_two_body( + op1: np.ndarray, op2: np.ndarray, length: int, i: int +) -> np.ndarray: """Embed a nearest-neighbor two-site operator into a length-L qubit Hilbert space. Args: @@ -113,7 +115,9 @@ def _ising_dense(length: int, j_val: float, g: float) -> np.ndarray: return H -def _heisenberg_dense(length: int, jx: float, jy: float, jz: float, h: float) -> np.ndarray: +def _heisenberg_dense( + length: int, jx: float, jy: float, jz: float, h: float +) -> np.ndarray: """Construct the dense Heisenberg Hamiltonian for an open chain. The Hamiltonian is @@ -142,7 +146,9 @@ def _heisenberg_dense(length: int, jx: float, jy: float, jz: float, h: float) -> return H -def _bose_hubbard_dense(length: int, local_dim: int, omega: float, hopping_j: float, hubbard_u: float) -> np.ndarray: +def _bose_hubbard_dense( + length: int, local_dim: int, omega: float, hopping_j: float, hubbard_u: float +) -> np.ndarray: """Construct the exact dense Bose-Hubbard Hamiltonian for comparison. Returns: @@ -300,7 +306,9 @@ def test_bose_hubbard_correct_operator() -> None: assert mpo.length == length assert mpo.physical_dimension == local_dim assert len(mpo.tensors) == length - assert all(t.shape[2] <= 4 and t.shape[3] <= 4 for t in mpo.tensors), "Bond dimension should be 4" + assert all( + t.shape[2] <= 4 and t.shape[3] <= 4 for t in mpo.tensors + ), "Bond dimension should be 4" # Dense comparison H_dense = _bose_hubbard_dense(length, local_dim, omega, J, U) @@ -401,13 +409,34 @@ def test_from_matrix() -> None: Hmpo = MPO.from_matrix(H, d, 4) assert np.allclose(H, Hmpo.to_matrix()) - H = np.random.rand(d**length, d**length) + 1j * np.random.rand(d**length, d ** (length)) + H = np.random.rand(d**length, d**length) + 1j * np.random.rand( + d**length, d ** (length) + ) Hmpo = MPO.from_matrix(H, d, 1000000) assert np.allclose(H, Hmpo.to_matrix()) length = 6 - H = np.random.rand(d**length, d ** (length)) + 1j * np.random.rand(d**length, d ** (length)) + H = np.random.rand(d**length, d ** (length)) + 1j * np.random.rand( + d**length, d ** (length) + ) Hmpo = MPO.from_matrix(H, d, 728) + + # test boundary cases + # single value + mat = np.eye(1) + with pytest.raises(ValueError, match="Physical dimension d must be > 0"): + MPO.from_matrix(mat, d=0) + + # non square + mat = np.zeros((4, 2)) + with pytest.raises(ValueError, match="Matrix must be square"): + MPO.from_matrix(mat, d=2) + + # impossible matrix + mat = np.eye(6) # 6 is not a power of 2, 3, etc. + with pytest.raises(ValueError, match="not a power"): + MPO.from_matrix(mat, d=2) + assert np.max(np.abs(H - Hmpo.to_matrix())) < 1e-2 @@ -494,7 +523,9 @@ def test_check_if_identity() -> None: ############################################################################## -@pytest.mark.parametrize("state", ["zeros", "ones", "x+", "x-", "y+", "y-", "Neel", "wall", "basis"]) +@pytest.mark.parametrize( + "state", ["zeros", "ones", "x+", "x-", "y+", "y-", "Neel", "wall", "basis"] +) def test_mps_initialization(state: str) -> None: """Test that MPS initializes with the correct chain length, physical dimensions, and tensor shapes. @@ -548,10 +579,18 @@ def test_mps_initialization(state: str) -> None: expected = np.array([1, -1j], dtype=complex) / np.sqrt(2) np.testing.assert_allclose(vec, expected) elif state == "Neel": - expected = np.array([1, 0], dtype=complex) if i % 2 else np.array([0, 1], dtype=complex) + expected = ( + np.array([1, 0], dtype=complex) + if i % 2 + else np.array([0, 1], dtype=complex) + ) np.testing.assert_allclose(vec, expected) elif state == "wall": - expected = np.array([1, 0], dtype=complex) if i < length // 2 else np.array([0, 1], dtype=complex) + expected = ( + np.array([1, 0], dtype=complex) + if i < length // 2 + else np.array([0, 1], dtype=complex) + ) np.testing.assert_allclose(vec, expected) elif state == "basis": bit = int(basis_string[i]) @@ -1003,7 +1042,9 @@ def test_pad_raises_on_shrink() -> None: mps = MPS(length=5, state="zeros") mps.pad_bond_dimension(4) # enlarge first - with pytest.raises(ValueError, match="Target bond dim must be at least current bond dim"): + with pytest.raises( + ValueError, match="Target bond dim must be at least current bond dim" + ): mps.pad_bond_dimension(2) # would shrink - must fail @@ -1229,7 +1270,9 @@ def test_evaluate_observables_diagnostics_and_meta_then_pvm_separately() -> None Observable(GateLibrary.entropy(), [1, 2]), Observable(GateLibrary.schmidt_spectrum(), [1, 2]), ] - sim_diag = AnalogSimParams(diagnostics_and_meta, elapsed_time=0.1, dt=0.1, show_progress=False) + sim_diag = AnalogSimParams( + diagnostics_and_meta, elapsed_time=0.1, dt=0.1, show_progress=False + ) results_diag = np.empty((len(diagnostics_and_meta), 2), dtype=object) mps.evaluate_observables(sim_diag, results_diag, column_index=0) @@ -1468,7 +1511,9 @@ def test_compress_one_sweep_raises_on_invalid_direction() -> None: np.zeros((2, 2, 1, 1), dtype=complex), ] with pytest.raises(ValueError, match=r"direction must be 'lr' or 'rl'\."): - mpo._compress_one_sweep(direction="xx", tol=1e-12, max_bond_dim=None) # noqa: SLF001 + mpo._compress_one_sweep( + direction="xx", tol=1e-12, max_bond_dim=None + ) # noqa: SLF001 def test_from_pauli_sum_empty_spec_is_identity_term() -> None: From 5cdac6bdf2ebe8ed08b33cce010c0936b58d1d8b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 16 Feb 2026 14:46:54 +0000 Subject: [PATCH 19/25] =?UTF-8?q?=F0=9F=8E=A8=20pre-commit=20fixes?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/mqt/yaqs/core/data_structures/networks.py | 215 +++++------------- tests/core/data_structures/test_networks.py | 52 +---- 2 files changed, 66 insertions(+), 201 deletions(-) diff --git a/src/mqt/yaqs/core/data_structures/networks.py b/src/mqt/yaqs/core/data_structures/networks.py index cf24f0f4..7d0df418 100644 --- a/src/mqt/yaqs/core/data_structures/networks.py +++ b/src/mqt/yaqs/core/data_structures/networks.py @@ -174,9 +174,7 @@ def __init__( vector[0] = rng.random() vector[1] = 1 - vector[0] elif state == "basis": - assert ( - basis_string is not None - ), "basis_string must be provided for 'basis' state initialization." + assert basis_string is not None, "basis_string must be provided for 'basis' state initialization." self.init_mps_from_basis(basis_string, self.physical_dimensions) break else: @@ -193,9 +191,7 @@ def __init__( if pad is not None: self.pad_bond_dimension(pad) - def init_mps_from_basis( - self, basis_string: str, physical_dimensions: list[int] - ) -> None: + def init_mps_from_basis(self, basis_string: str, physical_dimensions: list[int]) -> None: """Initialize a list of MPS tensors representing a product state from a basis string. Args: @@ -354,12 +350,8 @@ def get_schmidt_spectrum(self, sites: list[int]) -> NDArray[np.float64]: NDArray[np.float64]: The Schmidt spectrum (length 500), with unused entries filled with NaN. """ - assert ( - len(sites) == 2 - ), "Schmidt spectrum is defined on a bond (two adjacent sites)." - assert ( - sites[0] + 1 == sites[1] - ), "Schmidt spectrum only defined for nearest-neighbor cut." + assert len(sites) == 2, "Schmidt spectrum is defined on a bond (two adjacent sites)." + assert sites[0] + 1 == sites[1], "Schmidt spectrum only defined for nearest-neighbor cut." top_schmidt_vals = 500 i, j = sites a, b = self.tensors[i], self.tensors[j] @@ -415,9 +407,7 @@ def almost_equal(self, other: MPS) -> bool: return False return True - def shift_orthogonality_center_right( - self, current_orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def shift_orthogonality_center_right(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: """Shifts orthogonality center right. This function performs a QR decomposition to shift the known current center to the right and move @@ -451,9 +441,7 @@ def shift_orthogonality_center_right( self.tensors[current_orthogonality_center + 1], ) = (a_new, b_new) - def shift_orthogonality_center_left( - self, current_orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def shift_orthogonality_center_left(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: """Shifts orthogonality center left. This function flips the network, performs a right shift, then flips the network again. @@ -464,14 +452,10 @@ def shift_orthogonality_center_left( Default is QR. """ self.flip_network() - self.shift_orthogonality_center_right( - self.length - current_orthogonality_center - 1, decomposition - ) + self.shift_orthogonality_center_right(self.length - current_orthogonality_center - 1, decomposition) self.flip_network() - def set_canonical_form( - self, orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def set_canonical_form(self, orthogonality_center: int, decomposition: str = "QR") -> None: """Sets canonical form of MPS. Left and right normalizes an MPS around a selected site. @@ -482,9 +466,7 @@ def set_canonical_form( decomposition: Type of decomposition. Default QR. """ - def sweep_decomposition( - orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def sweep_decomposition(orthogonality_center: int, decomposition: str = "QR") -> None: for site, _ in enumerate(self.tensors): if site == orthogonality_center: break @@ -515,17 +497,13 @@ def normalize(self, form: str = "B", decomposition: str = "QR") -> None: if form == "B": self.flip_network() - self.set_canonical_form( - orthogonality_center=self.length - 1, decomposition=decomposition - ) + self.set_canonical_form(orthogonality_center=self.length - 1, decomposition=decomposition) self.shift_orthogonality_center_right(self.length - 1, decomposition) if form == "B": self.flip_network() - def truncate( - self, threshold: float = 1e-12, max_bond_dim: int | None = None - ) -> None: + def truncate(self, threshold: float = 1e-12, max_bond_dim: int | None = None) -> None: """In-place MPS truncation via repeated two-site SVDs.""" orth_center = self.check_canonical_form()[0] if self.length == 1: @@ -547,9 +525,7 @@ def truncate( self.flip_network() - def scalar_product( - self, other: MPS, sites: int | list[int] | None = None - ) -> np.complex128: + def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np.complex128: """Compute the scalar (inner) product between two Matrix Product States (MPS). The function contracts the corresponding tensors of two MPS objects. If no specific site is @@ -576,12 +552,8 @@ def scalar_product( result = None for idx in range(self.length): # contract at each site into a 4-leg tensor - theta = oe.contract( - "abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx] - ) - result = ( - theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) - ) + theta = oe.contract("abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx]) + result = theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) # squeeze down to scalar assert result is not None return np.complex128(np.squeeze(result)) @@ -613,9 +585,7 @@ def scalar_product( msg = f"Invalid `sites` argument: {sites!r}" raise ValueError(msg) - def local_expect( - self, operator: Observable, sites: int | list[int] - ) -> np.complex128: + def local_expect(self, operator: Observable, sites: int | list[int]) -> np.complex128: """Compute the local expectation value of an operator on an MPS. The function applies the given operator to the tensor at the specified site of a deep copy of the @@ -641,17 +611,11 @@ def local_expect( i = sites if isinstance(operator.sites, list): - assert ( - operator.sites[0] == i - ), f"Operator sites mismatch {operator.sites[0]}, {i}" + assert operator.sites[0] == i, f"Operator sites mismatch {operator.sites[0]}, {i}" elif isinstance(operator.sites, int): - assert ( - operator.sites == i - ), f"Operator sites mismatch {operator.sites}, {i}" + assert operator.sites == i, f"Operator sites mismatch {operator.sites}, {i}" - assert ( - i is not None - ), f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" + assert i is not None, f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" a = temp_state.tensors[i] temp_state.tensors[i] = oe.contract("ab, bcd->acd", operator.gate.matrix, a) @@ -662,12 +626,10 @@ def local_expect( assert operator.sites[0] == i, "Observable sites mismatch" assert operator.sites[1] == j, "Observable sites mismatch" - assert ( - operator.sites[0] < operator.sites[1] - ), "Observable sites must be in ascending order." - assert ( - operator.sites[1] - operator.sites[0] == 1 - ), "Only nearest-neighbor observables are currently implemented." + assert operator.sites[0] < operator.sites[1], "Observable sites must be in ascending order." + assert operator.sites[1] - operator.sites[0] == 1, ( + "Only nearest-neighbor observables are currently implemented." + ) a = temp_state.tensors[i] b = temp_state.tensors[j] d_i, left, _ = a.shape @@ -679,9 +641,7 @@ def local_expect( theta = theta.reshape(left, d_i * d_j, right) # (l, d_i*d_j, r) # 2) apply operator on the combined phys index - theta = oe.contract( - "ab, cbd->cad", operator.gate.matrix, theta - ) # (l, d_i*d_j, r) + theta = oe.contract("ab, cbd->cad", operator.gate.matrix, theta) # (l, d_i*d_j, r) theta = theta.reshape(left, d_i, d_j, right) # back to (l, d_i, d_j, r) # 3) split via SVD @@ -694,9 +654,7 @@ def local_expect( u_tensor = u_mat.reshape(left, d_i, chi_new) # (l, d_i, r_new) a_new = u_tensor.transpose(1, 0, 2) # → (d_i, l, r_new) - v_tensor = (np.diag(s_vec) @ v_mat).reshape( - chi_new, d_j, right - ) # (l_new, d_j, r) + v_tensor = (np.diag(s_vec) @ v_mat).reshape(chi_new, d_j, right) # (l_new, d_j, r) b_new = v_tensor.transpose(1, 0, 2) # → (d_j, l_new, r) temp_state.tensors[i] = a_new @@ -732,44 +690,24 @@ def evaluate_observables( elif observable.gate.name == "total_bond": results[obs_index, column_index] = self.get_total_bond() elif observable.gate.name in {"entropy", "schmidt_spectrum"}: - assert isinstance( - observable.sites, list - ), "Given metric requires a list of sites" - assert ( - len(observable.sites) == 2 - ), "Given metric requires 2 sites to act on." + assert isinstance(observable.sites, list), "Given metric requires a list of sites" + assert len(observable.sites) == 2, "Given metric requires 2 sites to act on." max_site = max(observable.sites) min_site = min(observable.sites) - assert ( - max_site - min_site == 1 - ), "Entropy and Schmidt cuts must be nearest neighbor." + assert max_site - min_site == 1, "Entropy and Schmidt cuts must be nearest neighbor." for s in observable.sites: - assert s in range( - self.length - ), f"Observable acting on non-existing site: {s}" + assert s in range(self.length), f"Observable acting on non-existing site: {s}" if observable.gate.name == "entropy": - results[obs_index, column_index] = self.get_entropy( - observable.sites - ) + results[obs_index, column_index] = self.get_entropy(observable.sites) elif observable.gate.name == "schmidt_spectrum": - results[obs_index, column_index] = self.get_schmidt_spectrum( - observable.sites - ) + results[obs_index, column_index] = self.get_schmidt_spectrum(observable.sites) elif observable.gate.name == "pvm": - assert hasattr( - observable.gate, "bitstring" - ), "Gate does not have attribute bitstring." - results[obs_index, column_index] = self.project_onto_bitstring( - observable.gate.bitstring - ) + assert hasattr(observable.gate, "bitstring"), "Gate does not have attribute bitstring." + results[obs_index, column_index] = self.project_onto_bitstring(observable.gate.bitstring) else: - idx = ( - observable.sites[0] - if isinstance(observable.sites, list) - else observable.sites - ) + idx = observable.sites[0] if isinstance(observable.sites, list) else observable.sites if idx > last_site: for site in range(last_site, idx): temp_state.shift_orthogonality_center_right(site) @@ -794,24 +732,16 @@ def expect(self, observable: Observable) -> np.float64: elif isinstance(observable.sites, list): sites_list = observable.sites - assert ( - sites_list is not None - ), f"Invalid type in expect {type(observable.sites).__name__}" + assert sites_list is not None, f"Invalid type in expect {type(observable.sites).__name__}" - assert ( - len(sites_list) < 3 - ), "Only one- and two-site observables are currently implemented." + assert len(sites_list) < 3, "Only one- and two-site observables are currently implemented." for s in sites_list: - assert s in range( - self.length - ), f"Observable acting on non-existing site: {s}" + assert s in range(self.length), f"Observable acting on non-existing site: {s}" exp = self.local_expect(observable, sites_list) - assert ( - exp.imag < 1e-13 - ), f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." + assert exp.imag < 1e-13, f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." return exp.real def measure_single_shot(self) -> int: @@ -828,9 +758,7 @@ def measure_single_shot(self) -> int: temp_state = copy.deepcopy(self) bitstring = [] for site, tensor in enumerate(temp_state.tensors): - reduced_density_matrix = oe.contract( - "abc, dbc->ad", tensor, np.conj(tensor) - ) + reduced_density_matrix = oe.contract("abc, dbc->ad", tensor, np.conj(tensor)) probabilities = np.diag(reduced_density_matrix).real rng = np.random.default_rng() chosen_index = rng.choice(len(probabilities), p=probabilities) @@ -844,9 +772,7 @@ def measure_single_shot(self) -> int: temp_state.tensors[site + 1] = ( # noqa: B909 1 / np.sqrt(probabilities[chosen_index]) - * oe.contract( - "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] - ) + * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) ) return sum(c << i for i, c in enumerate(bitstring)) @@ -871,14 +797,10 @@ def measure_shots(self, shots: int) -> dict[int, int]: if shots > 1: max_workers = max(1, multiprocessing.cpu_count() - 1) with ( - concurrent.futures.ProcessPoolExecutor( - max_workers=max_workers - ) as executor, + concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor, tqdm(total=shots, desc="Measuring shots", ncols=80) as pbar, ): - futures = [ - executor.submit(self.measure_single_shot) for _ in range(shots) - ] + futures = [executor.submit(self.measure_single_shot) for _ in range(shots)] for future in concurrent.futures.as_completed(futures): result = future.result() results[result] = results.get(result, 0) + 1 @@ -902,9 +824,7 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: Returns: float: Probability of obtaining the given bitstring under projective measurement. """ - assert ( - len(bitstring) == self.length - ), "Bitstring length must match number of sites" + assert len(bitstring) == self.length, "Bitstring length must match number of sites" temp_state = copy.deepcopy(self) total_norm = 1.0 @@ -912,9 +832,7 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: state_index = int(char) tensor = temp_state.tensors[site] local_dim = self.physical_dimensions[site] - assert ( - 0 <= state_index < local_dim - ), f"Invalid state index {state_index} at site {site}" + assert 0 <= state_index < local_dim, f"Invalid state index {state_index} at site {site}" selected_state = np.zeros(local_dim) selected_state[state_index] = 1 @@ -931,11 +849,7 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: # Normalize and propagate if site != self.length - 1: temp_state.tensors[site + 1] = ( - 1 - / norm - * oe.contract( - "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] - ) + 1 / norm * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) ) return np.complex128(total_norm**2) @@ -1452,20 +1366,13 @@ def bose_hubbard( tensor[3, 3] = id_boson # build the full tensor list - tensors = [ - np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) - for _ in range(length) - ] + tensors = [np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) for _ in range(length)] # Left boundary: take only row 0 - tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype( - np.complex128 - ) + tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype(np.complex128) # Right boundary: take only col 3 - tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype( - np.complex128 - ) + tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype(np.complex128) mpo = cls() mpo.tensors = tensors @@ -1523,9 +1430,7 @@ def finite_state_machine( self.length = len(self.tensors) self.physical_dimension = self.tensors[0].shape[0] - def custom( - self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True - ) -> None: + def custom(self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True) -> None: """Custom MPO from tensors. Initialize the custom MPO (Matrix Product Operator) with the given tensors. @@ -1593,9 +1498,7 @@ def from_pauli_sum( self.physical_dimension = physical_dimension if not terms: - self.tensors = [ - np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length) - ] + self.tensors = [np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length)] return # 1. Parse terms into dense lists of operator names. @@ -1629,9 +1532,7 @@ def from_pauli_sum( term_trajectories[t_idx][length] = 0 # bond_state_maps[i] stores the mapping: (Op_str, Next_State_ID) -> Current_State_ID - bond_state_maps: list[dict[tuple[str, int], int]] = [ - {} for _ in range(length + 1) - ] + bond_state_maps: list[dict[tuple[str, int], int]] = [{} for _ in range(length + 1)] # Sweep Right-to-Left (sites L-1 down to 1) to build the FSM transitions. # We stop at bond 1. Bond 0 is always the single "Start" state. @@ -1697,9 +1598,7 @@ def from_pauli_sum( # The FSM construction is optimal for one-sided (suffix) uniqueness. # A standard two-sweep compression ("lr_rl") puts the MPO in canonical form # and removes any remaining redundancies (e.g., common prefixes). - self.compress( - tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl" - ) + self.compress(tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl") assert self.check_if_valid_mpo(), "MPO initialized wrong" def compress( @@ -1748,13 +1647,9 @@ def compress( for _ in range(n_sweeps): for direction in schedule: - self._compress_one_sweep( - direction=direction, tol=tol, max_bond_dim=max_bond_dim - ) + self._compress_one_sweep(direction=direction, tol=tol, max_bond_dim=max_bond_dim) - def _compress_one_sweep( - self, *, direction: str, tol: float, max_bond_dim: int | None - ) -> None: + def _compress_one_sweep(self, *, direction: str, tol: float, max_bond_dim: int | None) -> None: """Run one in-place MPO SVD compression sweep in the given direction. Args: @@ -1804,9 +1699,7 @@ def _compress_one_sweep( vh = vh[:keep, :] # Left tensor: (bond_dim_left, d, d, keep) -> (d, d, bond_dim_left, keep) - left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose( - 1, 2, 0, 3 - ) + left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose(1, 2, 0, 3) # Right tensor: (keep, d, d, bond_dim_right) -> (d, d, keep, bond_dim_right) svh = (s[:, None] * vh).reshape(keep, phys_dim, phys_dim, bond_dim_right) diff --git a/tests/core/data_structures/test_networks.py b/tests/core/data_structures/test_networks.py index b25cafbe..48c5c3e4 100644 --- a/tests/core/data_structures/test_networks.py +++ b/tests/core/data_structures/test_networks.py @@ -64,9 +64,7 @@ def _embed_one_body(op: np.ndarray, length: int, i: int) -> np.ndarray: return out -def _embed_two_body( - op1: np.ndarray, op2: np.ndarray, length: int, i: int -) -> np.ndarray: +def _embed_two_body(op1: np.ndarray, op2: np.ndarray, length: int, i: int) -> np.ndarray: """Embed a nearest-neighbor two-site operator into a length-L qubit Hilbert space. Args: @@ -115,9 +113,7 @@ def _ising_dense(length: int, j_val: float, g: float) -> np.ndarray: return H -def _heisenberg_dense( - length: int, jx: float, jy: float, jz: float, h: float -) -> np.ndarray: +def _heisenberg_dense(length: int, jx: float, jy: float, jz: float, h: float) -> np.ndarray: """Construct the dense Heisenberg Hamiltonian for an open chain. The Hamiltonian is @@ -146,9 +142,7 @@ def _heisenberg_dense( return H -def _bose_hubbard_dense( - length: int, local_dim: int, omega: float, hopping_j: float, hubbard_u: float -) -> np.ndarray: +def _bose_hubbard_dense(length: int, local_dim: int, omega: float, hopping_j: float, hubbard_u: float) -> np.ndarray: """Construct the exact dense Bose-Hubbard Hamiltonian for comparison. Returns: @@ -306,9 +300,7 @@ def test_bose_hubbard_correct_operator() -> None: assert mpo.length == length assert mpo.physical_dimension == local_dim assert len(mpo.tensors) == length - assert all( - t.shape[2] <= 4 and t.shape[3] <= 4 for t in mpo.tensors - ), "Bond dimension should be 4" + assert all(t.shape[2] <= 4 and t.shape[3] <= 4 for t in mpo.tensors), "Bond dimension should be 4" # Dense comparison H_dense = _bose_hubbard_dense(length, local_dim, omega, J, U) @@ -409,16 +401,12 @@ def test_from_matrix() -> None: Hmpo = MPO.from_matrix(H, d, 4) assert np.allclose(H, Hmpo.to_matrix()) - H = np.random.rand(d**length, d**length) + 1j * np.random.rand( - d**length, d ** (length) - ) + H = np.random.rand(d**length, d**length) + 1j * np.random.rand(d**length, d ** (length)) Hmpo = MPO.from_matrix(H, d, 1000000) assert np.allclose(H, Hmpo.to_matrix()) length = 6 - H = np.random.rand(d**length, d ** (length)) + 1j * np.random.rand( - d**length, d ** (length) - ) + H = np.random.rand(d**length, d ** (length)) + 1j * np.random.rand(d**length, d ** (length)) Hmpo = MPO.from_matrix(H, d, 728) # test boundary cases @@ -523,9 +511,7 @@ def test_check_if_identity() -> None: ############################################################################## -@pytest.mark.parametrize( - "state", ["zeros", "ones", "x+", "x-", "y+", "y-", "Neel", "wall", "basis"] -) +@pytest.mark.parametrize("state", ["zeros", "ones", "x+", "x-", "y+", "y-", "Neel", "wall", "basis"]) def test_mps_initialization(state: str) -> None: """Test that MPS initializes with the correct chain length, physical dimensions, and tensor shapes. @@ -579,18 +565,10 @@ def test_mps_initialization(state: str) -> None: expected = np.array([1, -1j], dtype=complex) / np.sqrt(2) np.testing.assert_allclose(vec, expected) elif state == "Neel": - expected = ( - np.array([1, 0], dtype=complex) - if i % 2 - else np.array([0, 1], dtype=complex) - ) + expected = np.array([1, 0], dtype=complex) if i % 2 else np.array([0, 1], dtype=complex) np.testing.assert_allclose(vec, expected) elif state == "wall": - expected = ( - np.array([1, 0], dtype=complex) - if i < length // 2 - else np.array([0, 1], dtype=complex) - ) + expected = np.array([1, 0], dtype=complex) if i < length // 2 else np.array([0, 1], dtype=complex) np.testing.assert_allclose(vec, expected) elif state == "basis": bit = int(basis_string[i]) @@ -1042,9 +1020,7 @@ def test_pad_raises_on_shrink() -> None: mps = MPS(length=5, state="zeros") mps.pad_bond_dimension(4) # enlarge first - with pytest.raises( - ValueError, match="Target bond dim must be at least current bond dim" - ): + with pytest.raises(ValueError, match="Target bond dim must be at least current bond dim"): mps.pad_bond_dimension(2) # would shrink - must fail @@ -1270,9 +1246,7 @@ def test_evaluate_observables_diagnostics_and_meta_then_pvm_separately() -> None Observable(GateLibrary.entropy(), [1, 2]), Observable(GateLibrary.schmidt_spectrum(), [1, 2]), ] - sim_diag = AnalogSimParams( - diagnostics_and_meta, elapsed_time=0.1, dt=0.1, show_progress=False - ) + sim_diag = AnalogSimParams(diagnostics_and_meta, elapsed_time=0.1, dt=0.1, show_progress=False) results_diag = np.empty((len(diagnostics_and_meta), 2), dtype=object) mps.evaluate_observables(sim_diag, results_diag, column_index=0) @@ -1511,9 +1485,7 @@ def test_compress_one_sweep_raises_on_invalid_direction() -> None: np.zeros((2, 2, 1, 1), dtype=complex), ] with pytest.raises(ValueError, match=r"direction must be 'lr' or 'rl'\."): - mpo._compress_one_sweep( - direction="xx", tol=1e-12, max_bond_dim=None - ) # noqa: SLF001 + mpo._compress_one_sweep(direction="xx", tol=1e-12, max_bond_dim=None) # noqa: SLF001 def test_from_pauli_sum_empty_spec_is_identity_term() -> None: From 06d0e3c448851b09fdac72cf6d92d6ea7ba986d2 Mon Sep 17 00:00:00 2001 From: Lukas Date: Mon, 16 Feb 2026 15:50:21 +0100 Subject: [PATCH 20/25] changed to rng in test --- tests/core/data_structures/test_networks.py | 65 +++++++++++++++------ 1 file changed, 46 insertions(+), 19 deletions(-) diff --git a/tests/core/data_structures/test_networks.py b/tests/core/data_structures/test_networks.py index b25cafbe..e9fd17ef 100644 --- a/tests/core/data_structures/test_networks.py +++ b/tests/core/data_structures/test_networks.py @@ -397,47 +397,74 @@ def test_custom() -> None: def test_from_matrix() -> None: """Test that from_matrix() constructs a correct MPO. - This test constructs a dense Bose-Hubbard Hamiltonian and creates an MPO via from_matrix(). This - is converted back via to_matrix and is compared to the original. The same is done for a random - matrix at maximal bond dimension and close-to maximal bond dimension. + This test constructs a dense Bose-Hubbard Hamiltonian and creates an MPO via from_matrix(). + It checks: + - reconstruction correctness for Bose-Hubbard + - random matrices at very large bond dimension + - random matrices at moderately truncated bond dimension + - all validation error branches (Codecov) """ + import numpy as np + import pytest + + rng = np.random.default_rng() + + # ------------------------------------------------- + # 1) Bose–Hubbard Hamiltonian (exact up to bond dim 4) + # ------------------------------------------------- + length = 5 - # local dimension - d = 3 + d = 3 # local dimension H = _bose_hubbard_dense(length, d, 0.9, 0.6, 0.2) - # exact down to bond dimension 4 + Hmpo = MPO.from_matrix(H, d, 4) assert np.allclose(H, Hmpo.to_matrix()) - H = np.random.rand(d**length, d**length) + 1j * np.random.rand( - d**length, d ** (length) - ) - Hmpo = MPO.from_matrix(H, d, 1000000) + # ------------------------------------------------- + # 2) Random matrix, max bond dimension + # ------------------------------------------------- + + H = rng.random((d**length, d**length)) + 1j * rng.random((d**length, d**length)) + Hmpo = MPO.from_matrix(H, d, 1_000_000) assert np.allclose(H, Hmpo.to_matrix()) + # ------------------------------------------------- + # 3) Random matrix, moderately truncated + # ------------------------------------------------- + length = 6 - H = np.random.rand(d**length, d ** (length)) + 1j * np.random.rand( - d**length, d ** (length) - ) + H = rng.random((d**length, d**length)) + 1j * rng.random((d**length, d**length)) Hmpo = MPO.from_matrix(H, d, 728) + assert np.max(np.abs(H - Hmpo.to_matrix())) < 1e-2 + + # ------------------------------------------------- + # 4) Boundary cases: hit ALL validation branches + # ------------------------------------------------- - # test boundary cases - # single value + # d <= 0 mat = np.eye(1) with pytest.raises(ValueError, match="Physical dimension d must be > 0"): MPO.from_matrix(mat, d=0) - # non square + # non-square matrix mat = np.zeros((4, 2)) with pytest.raises(ValueError, match="Matrix must be square"): MPO.from_matrix(mat, d=2) - # impossible matrix - mat = np.eye(6) # 6 is not a power of 2, 3, etc. + # d == 1 but matrix not 1x1 + mat = np.eye(4) + with pytest.raises(ValueError, match="1x1"): + MPO.from_matrix(mat, d=1) + + # matrix dimension not a power of d + mat = np.eye(6) with pytest.raises(ValueError, match="not a power"): MPO.from_matrix(mat, d=2) - assert np.max(np.abs(H - Hmpo.to_matrix())) < 1e-2 + # inferred n < 1 (log(1)/log(100) = 0) + mat = np.eye(1) + with pytest.raises(ValueError, match="invalid"): + MPO.from_matrix(mat, d=100) def test_to_mps() -> None: From 61d591d50ef6b6c10eeb5650b00940b29ed8cc65 Mon Sep 17 00:00:00 2001 From: Lukas Date: Mon, 16 Feb 2026 16:00:48 +0100 Subject: [PATCH 21/25] updated version of from_matrix --- src/mqt/yaqs/core/data_structures/networks.py | 32 ++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/src/mqt/yaqs/core/data_structures/networks.py b/src/mqt/yaqs/core/data_structures/networks.py index 7d0df418..266b4c68 100644 --- a/src/mqt/yaqs/core/data_structures/networks.py +++ b/src/mqt/yaqs/core/data_structures/networks.py @@ -1783,7 +1783,37 @@ def from_matrix( max_bond: int | None = None, cutoff: float = 0.0, ) -> MPO: - """Factorize a dense matrix into an MPO with uniform local dimension ``d``.""" + """Factorize a dense matrix into an MPO with uniform local dimension ``d``. + + Each site has local shape ``(d, d)``. + The number of sites ``n`` is inferred from the relation: + + mat.shape = (d**n, d**n) + + Args: + mat (np.ndarray): + Square matrix of shape ``(d**n, d**n)``. + d (int): + Physical dimension per site. Must satisfy ``d > 0``. + max_bond (int | None): + Maximum allowed bond dimension (before truncation). + cutoff (float): + Singular values ``<= cutoff`` are discarded. By default cutoff=0: all non-zero singular + values are included. + + Returns: + MPO: + An MPO with ``n`` sites, uniform physical dimension ``d`` per site, + and bond dimensions determined by SVD truncation. + + Raises: + ValueError: + If ``d <= 0``; + If ``d == 1`` but the matrix is not ``1 x 1``; + If the matrix is not square; + If ``rows`` is not a power of ``d``; + If the inferred number of sites ``n < 1``. + """ if d <= 0: msg = f"Physical dimension d must be > 0, got d={d}." raise ValueError(msg) From f23fc794c9c6a7208568b5878938ba9f9f847dd6 Mon Sep 17 00:00:00 2001 From: Lukas Date: Mon, 16 Feb 2026 16:02:56 +0100 Subject: [PATCH 22/25] test comments format update --- tests/core/data_structures/test_networks.py | 63 +++++++++++---------- 1 file changed, 34 insertions(+), 29 deletions(-) diff --git a/tests/core/data_structures/test_networks.py b/tests/core/data_structures/test_networks.py index a2592d6b..f35e5237 100644 --- a/tests/core/data_structures/test_networks.py +++ b/tests/core/data_structures/test_networks.py @@ -64,7 +64,9 @@ def _embed_one_body(op: np.ndarray, length: int, i: int) -> np.ndarray: return out -def _embed_two_body(op1: np.ndarray, op2: np.ndarray, length: int, i: int) -> np.ndarray: +def _embed_two_body( + op1: np.ndarray, op2: np.ndarray, length: int, i: int +) -> np.ndarray: """Embed a nearest-neighbor two-site operator into a length-L qubit Hilbert space. Args: @@ -113,7 +115,9 @@ def _ising_dense(length: int, j_val: float, g: float) -> np.ndarray: return H -def _heisenberg_dense(length: int, jx: float, jy: float, jz: float, h: float) -> np.ndarray: +def _heisenberg_dense( + length: int, jx: float, jy: float, jz: float, h: float +) -> np.ndarray: """Construct the dense Heisenberg Hamiltonian for an open chain. The Hamiltonian is @@ -142,7 +146,9 @@ def _heisenberg_dense(length: int, jx: float, jy: float, jz: float, h: float) -> return H -def _bose_hubbard_dense(length: int, local_dim: int, omega: float, hopping_j: float, hubbard_u: float) -> np.ndarray: +def _bose_hubbard_dense( + length: int, local_dim: int, omega: float, hopping_j: float, hubbard_u: float +) -> np.ndarray: """Construct the exact dense Bose-Hubbard Hamiltonian for comparison. Returns: @@ -300,7 +306,9 @@ def test_bose_hubbard_correct_operator() -> None: assert mpo.length == length assert mpo.physical_dimension == local_dim assert len(mpo.tensors) == length - assert all(t.shape[2] <= 4 and t.shape[3] <= 4 for t in mpo.tensors), "Bond dimension should be 4" + assert all( + t.shape[2] <= 4 and t.shape[3] <= 4 for t in mpo.tensors + ), "Bond dimension should be 4" # Dense comparison H_dense = _bose_hubbard_dense(length, local_dim, omega, J, U) @@ -396,15 +404,9 @@ def test_from_matrix() -> None: - random matrices at moderately truncated bond dimension - all validation error branches (Codecov) """ - import numpy as np - import pytest rng = np.random.default_rng() - # ------------------------------------------------- - # 1) Bose–Hubbard Hamiltonian (exact up to bond dim 4) - # ------------------------------------------------- - length = 5 d = 3 # local dimension H = _bose_hubbard_dense(length, d, 0.9, 0.6, 0.2) @@ -412,28 +414,15 @@ def test_from_matrix() -> None: Hmpo = MPO.from_matrix(H, d, 4) assert np.allclose(H, Hmpo.to_matrix()) - # ------------------------------------------------- - # 2) Random matrix, max bond dimension - # ------------------------------------------------- - H = rng.random((d**length, d**length)) + 1j * rng.random((d**length, d**length)) Hmpo = MPO.from_matrix(H, d, 1_000_000) assert np.allclose(H, Hmpo.to_matrix()) - # ------------------------------------------------- - # 3) Random matrix, moderately truncated - # ------------------------------------------------- - length = 6 H = rng.random((d**length, d**length)) + 1j * rng.random((d**length, d**length)) Hmpo = MPO.from_matrix(H, d, 728) assert np.max(np.abs(H - Hmpo.to_matrix())) < 1e-2 - # ------------------------------------------------- - # 4) Boundary cases: hit ALL validation branches - # ------------------------------------------------- - - # d <= 0 mat = np.eye(1) with pytest.raises(ValueError, match="Physical dimension d must be > 0"): MPO.from_matrix(mat, d=0) @@ -542,7 +531,9 @@ def test_check_if_identity() -> None: ############################################################################## -@pytest.mark.parametrize("state", ["zeros", "ones", "x+", "x-", "y+", "y-", "Neel", "wall", "basis"]) +@pytest.mark.parametrize( + "state", ["zeros", "ones", "x+", "x-", "y+", "y-", "Neel", "wall", "basis"] +) def test_mps_initialization(state: str) -> None: """Test that MPS initializes with the correct chain length, physical dimensions, and tensor shapes. @@ -596,10 +587,18 @@ def test_mps_initialization(state: str) -> None: expected = np.array([1, -1j], dtype=complex) / np.sqrt(2) np.testing.assert_allclose(vec, expected) elif state == "Neel": - expected = np.array([1, 0], dtype=complex) if i % 2 else np.array([0, 1], dtype=complex) + expected = ( + np.array([1, 0], dtype=complex) + if i % 2 + else np.array([0, 1], dtype=complex) + ) np.testing.assert_allclose(vec, expected) elif state == "wall": - expected = np.array([1, 0], dtype=complex) if i < length // 2 else np.array([0, 1], dtype=complex) + expected = ( + np.array([1, 0], dtype=complex) + if i < length // 2 + else np.array([0, 1], dtype=complex) + ) np.testing.assert_allclose(vec, expected) elif state == "basis": bit = int(basis_string[i]) @@ -1051,7 +1050,9 @@ def test_pad_raises_on_shrink() -> None: mps = MPS(length=5, state="zeros") mps.pad_bond_dimension(4) # enlarge first - with pytest.raises(ValueError, match="Target bond dim must be at least current bond dim"): + with pytest.raises( + ValueError, match="Target bond dim must be at least current bond dim" + ): mps.pad_bond_dimension(2) # would shrink - must fail @@ -1277,7 +1278,9 @@ def test_evaluate_observables_diagnostics_and_meta_then_pvm_separately() -> None Observable(GateLibrary.entropy(), [1, 2]), Observable(GateLibrary.schmidt_spectrum(), [1, 2]), ] - sim_diag = AnalogSimParams(diagnostics_and_meta, elapsed_time=0.1, dt=0.1, show_progress=False) + sim_diag = AnalogSimParams( + diagnostics_and_meta, elapsed_time=0.1, dt=0.1, show_progress=False + ) results_diag = np.empty((len(diagnostics_and_meta), 2), dtype=object) mps.evaluate_observables(sim_diag, results_diag, column_index=0) @@ -1516,7 +1519,9 @@ def test_compress_one_sweep_raises_on_invalid_direction() -> None: np.zeros((2, 2, 1, 1), dtype=complex), ] with pytest.raises(ValueError, match=r"direction must be 'lr' or 'rl'\."): - mpo._compress_one_sweep(direction="xx", tol=1e-12, max_bond_dim=None) # noqa: SLF001 + mpo._compress_one_sweep( + direction="xx", tol=1e-12, max_bond_dim=None + ) # noqa: SLF001 def test_from_pauli_sum_empty_spec_is_identity_term() -> None: From 99736c8a9610437e2b4a0bdcfafb50bfb84204fc Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 16 Feb 2026 15:03:33 +0000 Subject: [PATCH 23/25] =?UTF-8?q?=F0=9F=8E=A8=20pre-commit=20fixes?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tests/core/data_structures/test_networks.py | 45 +++++---------------- 1 file changed, 10 insertions(+), 35 deletions(-) diff --git a/tests/core/data_structures/test_networks.py b/tests/core/data_structures/test_networks.py index f35e5237..c979856a 100644 --- a/tests/core/data_structures/test_networks.py +++ b/tests/core/data_structures/test_networks.py @@ -64,9 +64,7 @@ def _embed_one_body(op: np.ndarray, length: int, i: int) -> np.ndarray: return out -def _embed_two_body( - op1: np.ndarray, op2: np.ndarray, length: int, i: int -) -> np.ndarray: +def _embed_two_body(op1: np.ndarray, op2: np.ndarray, length: int, i: int) -> np.ndarray: """Embed a nearest-neighbor two-site operator into a length-L qubit Hilbert space. Args: @@ -115,9 +113,7 @@ def _ising_dense(length: int, j_val: float, g: float) -> np.ndarray: return H -def _heisenberg_dense( - length: int, jx: float, jy: float, jz: float, h: float -) -> np.ndarray: +def _heisenberg_dense(length: int, jx: float, jy: float, jz: float, h: float) -> np.ndarray: """Construct the dense Heisenberg Hamiltonian for an open chain. The Hamiltonian is @@ -146,9 +142,7 @@ def _heisenberg_dense( return H -def _bose_hubbard_dense( - length: int, local_dim: int, omega: float, hopping_j: float, hubbard_u: float -) -> np.ndarray: +def _bose_hubbard_dense(length: int, local_dim: int, omega: float, hopping_j: float, hubbard_u: float) -> np.ndarray: """Construct the exact dense Bose-Hubbard Hamiltonian for comparison. Returns: @@ -306,9 +300,7 @@ def test_bose_hubbard_correct_operator() -> None: assert mpo.length == length assert mpo.physical_dimension == local_dim assert len(mpo.tensors) == length - assert all( - t.shape[2] <= 4 and t.shape[3] <= 4 for t in mpo.tensors - ), "Bond dimension should be 4" + assert all(t.shape[2] <= 4 and t.shape[3] <= 4 for t in mpo.tensors), "Bond dimension should be 4" # Dense comparison H_dense = _bose_hubbard_dense(length, local_dim, omega, J, U) @@ -404,7 +396,6 @@ def test_from_matrix() -> None: - random matrices at moderately truncated bond dimension - all validation error branches (Codecov) """ - rng = np.random.default_rng() length = 5 @@ -531,9 +522,7 @@ def test_check_if_identity() -> None: ############################################################################## -@pytest.mark.parametrize( - "state", ["zeros", "ones", "x+", "x-", "y+", "y-", "Neel", "wall", "basis"] -) +@pytest.mark.parametrize("state", ["zeros", "ones", "x+", "x-", "y+", "y-", "Neel", "wall", "basis"]) def test_mps_initialization(state: str) -> None: """Test that MPS initializes with the correct chain length, physical dimensions, and tensor shapes. @@ -587,18 +576,10 @@ def test_mps_initialization(state: str) -> None: expected = np.array([1, -1j], dtype=complex) / np.sqrt(2) np.testing.assert_allclose(vec, expected) elif state == "Neel": - expected = ( - np.array([1, 0], dtype=complex) - if i % 2 - else np.array([0, 1], dtype=complex) - ) + expected = np.array([1, 0], dtype=complex) if i % 2 else np.array([0, 1], dtype=complex) np.testing.assert_allclose(vec, expected) elif state == "wall": - expected = ( - np.array([1, 0], dtype=complex) - if i < length // 2 - else np.array([0, 1], dtype=complex) - ) + expected = np.array([1, 0], dtype=complex) if i < length // 2 else np.array([0, 1], dtype=complex) np.testing.assert_allclose(vec, expected) elif state == "basis": bit = int(basis_string[i]) @@ -1050,9 +1031,7 @@ def test_pad_raises_on_shrink() -> None: mps = MPS(length=5, state="zeros") mps.pad_bond_dimension(4) # enlarge first - with pytest.raises( - ValueError, match="Target bond dim must be at least current bond dim" - ): + with pytest.raises(ValueError, match="Target bond dim must be at least current bond dim"): mps.pad_bond_dimension(2) # would shrink - must fail @@ -1278,9 +1257,7 @@ def test_evaluate_observables_diagnostics_and_meta_then_pvm_separately() -> None Observable(GateLibrary.entropy(), [1, 2]), Observable(GateLibrary.schmidt_spectrum(), [1, 2]), ] - sim_diag = AnalogSimParams( - diagnostics_and_meta, elapsed_time=0.1, dt=0.1, show_progress=False - ) + sim_diag = AnalogSimParams(diagnostics_and_meta, elapsed_time=0.1, dt=0.1, show_progress=False) results_diag = np.empty((len(diagnostics_and_meta), 2), dtype=object) mps.evaluate_observables(sim_diag, results_diag, column_index=0) @@ -1519,9 +1496,7 @@ def test_compress_one_sweep_raises_on_invalid_direction() -> None: np.zeros((2, 2, 1, 1), dtype=complex), ] with pytest.raises(ValueError, match=r"direction must be 'lr' or 'rl'\."): - mpo._compress_one_sweep( - direction="xx", tol=1e-12, max_bond_dim=None - ) # noqa: SLF001 + mpo._compress_one_sweep(direction="xx", tol=1e-12, max_bond_dim=None) # noqa: SLF001 def test_from_pauli_sum_empty_spec_is_identity_term() -> None: From fe1d3307443303331ef46b3236932b63c4184c5b Mon Sep 17 00:00:00 2001 From: Lukas Date: Tue, 17 Feb 2026 11:09:43 +0100 Subject: [PATCH 24/25] change default cutoff in frm_matrix to 1e-12 --- src/mqt/yaqs/core/data_structures/networks.py | 221 +++++++++++++----- 1 file changed, 164 insertions(+), 57 deletions(-) diff --git a/src/mqt/yaqs/core/data_structures/networks.py b/src/mqt/yaqs/core/data_structures/networks.py index 266b4c68..0938616a 100644 --- a/src/mqt/yaqs/core/data_structures/networks.py +++ b/src/mqt/yaqs/core/data_structures/networks.py @@ -174,7 +174,9 @@ def __init__( vector[0] = rng.random() vector[1] = 1 - vector[0] elif state == "basis": - assert basis_string is not None, "basis_string must be provided for 'basis' state initialization." + assert ( + basis_string is not None + ), "basis_string must be provided for 'basis' state initialization." self.init_mps_from_basis(basis_string, self.physical_dimensions) break else: @@ -191,7 +193,9 @@ def __init__( if pad is not None: self.pad_bond_dimension(pad) - def init_mps_from_basis(self, basis_string: str, physical_dimensions: list[int]) -> None: + def init_mps_from_basis( + self, basis_string: str, physical_dimensions: list[int] + ) -> None: """Initialize a list of MPS tensors representing a product state from a basis string. Args: @@ -350,8 +354,12 @@ def get_schmidt_spectrum(self, sites: list[int]) -> NDArray[np.float64]: NDArray[np.float64]: The Schmidt spectrum (length 500), with unused entries filled with NaN. """ - assert len(sites) == 2, "Schmidt spectrum is defined on a bond (two adjacent sites)." - assert sites[0] + 1 == sites[1], "Schmidt spectrum only defined for nearest-neighbor cut." + assert ( + len(sites) == 2 + ), "Schmidt spectrum is defined on a bond (two adjacent sites)." + assert ( + sites[0] + 1 == sites[1] + ), "Schmidt spectrum only defined for nearest-neighbor cut." top_schmidt_vals = 500 i, j = sites a, b = self.tensors[i], self.tensors[j] @@ -407,7 +415,9 @@ def almost_equal(self, other: MPS) -> bool: return False return True - def shift_orthogonality_center_right(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: + def shift_orthogonality_center_right( + self, current_orthogonality_center: int, decomposition: str = "QR" + ) -> None: """Shifts orthogonality center right. This function performs a QR decomposition to shift the known current center to the right and move @@ -441,7 +451,9 @@ def shift_orthogonality_center_right(self, current_orthogonality_center: int, de self.tensors[current_orthogonality_center + 1], ) = (a_new, b_new) - def shift_orthogonality_center_left(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: + def shift_orthogonality_center_left( + self, current_orthogonality_center: int, decomposition: str = "QR" + ) -> None: """Shifts orthogonality center left. This function flips the network, performs a right shift, then flips the network again. @@ -452,10 +464,14 @@ def shift_orthogonality_center_left(self, current_orthogonality_center: int, dec Default is QR. """ self.flip_network() - self.shift_orthogonality_center_right(self.length - current_orthogonality_center - 1, decomposition) + self.shift_orthogonality_center_right( + self.length - current_orthogonality_center - 1, decomposition + ) self.flip_network() - def set_canonical_form(self, orthogonality_center: int, decomposition: str = "QR") -> None: + def set_canonical_form( + self, orthogonality_center: int, decomposition: str = "QR" + ) -> None: """Sets canonical form of MPS. Left and right normalizes an MPS around a selected site. @@ -466,7 +482,9 @@ def set_canonical_form(self, orthogonality_center: int, decomposition: str = "QR decomposition: Type of decomposition. Default QR. """ - def sweep_decomposition(orthogonality_center: int, decomposition: str = "QR") -> None: + def sweep_decomposition( + orthogonality_center: int, decomposition: str = "QR" + ) -> None: for site, _ in enumerate(self.tensors): if site == orthogonality_center: break @@ -497,13 +515,17 @@ def normalize(self, form: str = "B", decomposition: str = "QR") -> None: if form == "B": self.flip_network() - self.set_canonical_form(orthogonality_center=self.length - 1, decomposition=decomposition) + self.set_canonical_form( + orthogonality_center=self.length - 1, decomposition=decomposition + ) self.shift_orthogonality_center_right(self.length - 1, decomposition) if form == "B": self.flip_network() - def truncate(self, threshold: float = 1e-12, max_bond_dim: int | None = None) -> None: + def truncate( + self, threshold: float = 1e-12, max_bond_dim: int | None = None + ) -> None: """In-place MPS truncation via repeated two-site SVDs.""" orth_center = self.check_canonical_form()[0] if self.length == 1: @@ -525,7 +547,9 @@ def truncate(self, threshold: float = 1e-12, max_bond_dim: int | None = None) -> self.flip_network() - def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np.complex128: + def scalar_product( + self, other: MPS, sites: int | list[int] | None = None + ) -> np.complex128: """Compute the scalar (inner) product between two Matrix Product States (MPS). The function contracts the corresponding tensors of two MPS objects. If no specific site is @@ -552,8 +576,12 @@ def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np result = None for idx in range(self.length): # contract at each site into a 4-leg tensor - theta = oe.contract("abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx]) - result = theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) + theta = oe.contract( + "abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx] + ) + result = ( + theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) + ) # squeeze down to scalar assert result is not None return np.complex128(np.squeeze(result)) @@ -585,7 +613,9 @@ def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np msg = f"Invalid `sites` argument: {sites!r}" raise ValueError(msg) - def local_expect(self, operator: Observable, sites: int | list[int]) -> np.complex128: + def local_expect( + self, operator: Observable, sites: int | list[int] + ) -> np.complex128: """Compute the local expectation value of an operator on an MPS. The function applies the given operator to the tensor at the specified site of a deep copy of the @@ -611,11 +641,17 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl i = sites if isinstance(operator.sites, list): - assert operator.sites[0] == i, f"Operator sites mismatch {operator.sites[0]}, {i}" + assert ( + operator.sites[0] == i + ), f"Operator sites mismatch {operator.sites[0]}, {i}" elif isinstance(operator.sites, int): - assert operator.sites == i, f"Operator sites mismatch {operator.sites}, {i}" + assert ( + operator.sites == i + ), f"Operator sites mismatch {operator.sites}, {i}" - assert i is not None, f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" + assert ( + i is not None + ), f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" a = temp_state.tensors[i] temp_state.tensors[i] = oe.contract("ab, bcd->acd", operator.gate.matrix, a) @@ -626,10 +662,12 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl assert operator.sites[0] == i, "Observable sites mismatch" assert operator.sites[1] == j, "Observable sites mismatch" - assert operator.sites[0] < operator.sites[1], "Observable sites must be in ascending order." - assert operator.sites[1] - operator.sites[0] == 1, ( - "Only nearest-neighbor observables are currently implemented." - ) + assert ( + operator.sites[0] < operator.sites[1] + ), "Observable sites must be in ascending order." + assert ( + operator.sites[1] - operator.sites[0] == 1 + ), "Only nearest-neighbor observables are currently implemented." a = temp_state.tensors[i] b = temp_state.tensors[j] d_i, left, _ = a.shape @@ -641,7 +679,9 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl theta = theta.reshape(left, d_i * d_j, right) # (l, d_i*d_j, r) # 2) apply operator on the combined phys index - theta = oe.contract("ab, cbd->cad", operator.gate.matrix, theta) # (l, d_i*d_j, r) + theta = oe.contract( + "ab, cbd->cad", operator.gate.matrix, theta + ) # (l, d_i*d_j, r) theta = theta.reshape(left, d_i, d_j, right) # back to (l, d_i, d_j, r) # 3) split via SVD @@ -654,7 +694,9 @@ def local_expect(self, operator: Observable, sites: int | list[int]) -> np.compl u_tensor = u_mat.reshape(left, d_i, chi_new) # (l, d_i, r_new) a_new = u_tensor.transpose(1, 0, 2) # → (d_i, l, r_new) - v_tensor = (np.diag(s_vec) @ v_mat).reshape(chi_new, d_j, right) # (l_new, d_j, r) + v_tensor = (np.diag(s_vec) @ v_mat).reshape( + chi_new, d_j, right + ) # (l_new, d_j, r) b_new = v_tensor.transpose(1, 0, 2) # → (d_j, l_new, r) temp_state.tensors[i] = a_new @@ -690,24 +732,44 @@ def evaluate_observables( elif observable.gate.name == "total_bond": results[obs_index, column_index] = self.get_total_bond() elif observable.gate.name in {"entropy", "schmidt_spectrum"}: - assert isinstance(observable.sites, list), "Given metric requires a list of sites" - assert len(observable.sites) == 2, "Given metric requires 2 sites to act on." + assert isinstance( + observable.sites, list + ), "Given metric requires a list of sites" + assert ( + len(observable.sites) == 2 + ), "Given metric requires 2 sites to act on." max_site = max(observable.sites) min_site = min(observable.sites) - assert max_site - min_site == 1, "Entropy and Schmidt cuts must be nearest neighbor." + assert ( + max_site - min_site == 1 + ), "Entropy and Schmidt cuts must be nearest neighbor." for s in observable.sites: - assert s in range(self.length), f"Observable acting on non-existing site: {s}" + assert s in range( + self.length + ), f"Observable acting on non-existing site: {s}" if observable.gate.name == "entropy": - results[obs_index, column_index] = self.get_entropy(observable.sites) + results[obs_index, column_index] = self.get_entropy( + observable.sites + ) elif observable.gate.name == "schmidt_spectrum": - results[obs_index, column_index] = self.get_schmidt_spectrum(observable.sites) + results[obs_index, column_index] = self.get_schmidt_spectrum( + observable.sites + ) elif observable.gate.name == "pvm": - assert hasattr(observable.gate, "bitstring"), "Gate does not have attribute bitstring." - results[obs_index, column_index] = self.project_onto_bitstring(observable.gate.bitstring) + assert hasattr( + observable.gate, "bitstring" + ), "Gate does not have attribute bitstring." + results[obs_index, column_index] = self.project_onto_bitstring( + observable.gate.bitstring + ) else: - idx = observable.sites[0] if isinstance(observable.sites, list) else observable.sites + idx = ( + observable.sites[0] + if isinstance(observable.sites, list) + else observable.sites + ) if idx > last_site: for site in range(last_site, idx): temp_state.shift_orthogonality_center_right(site) @@ -732,16 +794,24 @@ def expect(self, observable: Observable) -> np.float64: elif isinstance(observable.sites, list): sites_list = observable.sites - assert sites_list is not None, f"Invalid type in expect {type(observable.sites).__name__}" + assert ( + sites_list is not None + ), f"Invalid type in expect {type(observable.sites).__name__}" - assert len(sites_list) < 3, "Only one- and two-site observables are currently implemented." + assert ( + len(sites_list) < 3 + ), "Only one- and two-site observables are currently implemented." for s in sites_list: - assert s in range(self.length), f"Observable acting on non-existing site: {s}" + assert s in range( + self.length + ), f"Observable acting on non-existing site: {s}" exp = self.local_expect(observable, sites_list) - assert exp.imag < 1e-13, f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." + assert ( + exp.imag < 1e-13 + ), f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." return exp.real def measure_single_shot(self) -> int: @@ -758,7 +828,9 @@ def measure_single_shot(self) -> int: temp_state = copy.deepcopy(self) bitstring = [] for site, tensor in enumerate(temp_state.tensors): - reduced_density_matrix = oe.contract("abc, dbc->ad", tensor, np.conj(tensor)) + reduced_density_matrix = oe.contract( + "abc, dbc->ad", tensor, np.conj(tensor) + ) probabilities = np.diag(reduced_density_matrix).real rng = np.random.default_rng() chosen_index = rng.choice(len(probabilities), p=probabilities) @@ -772,7 +844,9 @@ def measure_single_shot(self) -> int: temp_state.tensors[site + 1] = ( # noqa: B909 1 / np.sqrt(probabilities[chosen_index]) - * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) + * oe.contract( + "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] + ) ) return sum(c << i for i, c in enumerate(bitstring)) @@ -797,10 +871,14 @@ def measure_shots(self, shots: int) -> dict[int, int]: if shots > 1: max_workers = max(1, multiprocessing.cpu_count() - 1) with ( - concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor, + concurrent.futures.ProcessPoolExecutor( + max_workers=max_workers + ) as executor, tqdm(total=shots, desc="Measuring shots", ncols=80) as pbar, ): - futures = [executor.submit(self.measure_single_shot) for _ in range(shots)] + futures = [ + executor.submit(self.measure_single_shot) for _ in range(shots) + ] for future in concurrent.futures.as_completed(futures): result = future.result() results[result] = results.get(result, 0) + 1 @@ -824,7 +902,9 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: Returns: float: Probability of obtaining the given bitstring under projective measurement. """ - assert len(bitstring) == self.length, "Bitstring length must match number of sites" + assert ( + len(bitstring) == self.length + ), "Bitstring length must match number of sites" temp_state = copy.deepcopy(self) total_norm = 1.0 @@ -832,7 +912,9 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: state_index = int(char) tensor = temp_state.tensors[site] local_dim = self.physical_dimensions[site] - assert 0 <= state_index < local_dim, f"Invalid state index {state_index} at site {site}" + assert ( + 0 <= state_index < local_dim + ), f"Invalid state index {state_index} at site {site}" selected_state = np.zeros(local_dim) selected_state[state_index] = 1 @@ -849,7 +931,11 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: # Normalize and propagate if site != self.length - 1: temp_state.tensors[site + 1] = ( - 1 / norm * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) + 1 + / norm + * oe.contract( + "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] + ) ) return np.complex128(total_norm**2) @@ -1366,13 +1452,20 @@ def bose_hubbard( tensor[3, 3] = id_boson # build the full tensor list - tensors = [np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) for _ in range(length)] + tensors = [ + np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) + for _ in range(length) + ] # Left boundary: take only row 0 - tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype(np.complex128) + tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype( + np.complex128 + ) # Right boundary: take only col 3 - tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype(np.complex128) + tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype( + np.complex128 + ) mpo = cls() mpo.tensors = tensors @@ -1430,7 +1523,9 @@ def finite_state_machine( self.length = len(self.tensors) self.physical_dimension = self.tensors[0].shape[0] - def custom(self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True) -> None: + def custom( + self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True + ) -> None: """Custom MPO from tensors. Initialize the custom MPO (Matrix Product Operator) with the given tensors. @@ -1498,7 +1593,9 @@ def from_pauli_sum( self.physical_dimension = physical_dimension if not terms: - self.tensors = [np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length)] + self.tensors = [ + np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length) + ] return # 1. Parse terms into dense lists of operator names. @@ -1532,7 +1629,9 @@ def from_pauli_sum( term_trajectories[t_idx][length] = 0 # bond_state_maps[i] stores the mapping: (Op_str, Next_State_ID) -> Current_State_ID - bond_state_maps: list[dict[tuple[str, int], int]] = [{} for _ in range(length + 1)] + bond_state_maps: list[dict[tuple[str, int], int]] = [ + {} for _ in range(length + 1) + ] # Sweep Right-to-Left (sites L-1 down to 1) to build the FSM transitions. # We stop at bond 1. Bond 0 is always the single "Start" state. @@ -1598,7 +1697,9 @@ def from_pauli_sum( # The FSM construction is optimal for one-sided (suffix) uniqueness. # A standard two-sweep compression ("lr_rl") puts the MPO in canonical form # and removes any remaining redundancies (e.g., common prefixes). - self.compress(tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl") + self.compress( + tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl" + ) assert self.check_if_valid_mpo(), "MPO initialized wrong" def compress( @@ -1647,9 +1748,13 @@ def compress( for _ in range(n_sweeps): for direction in schedule: - self._compress_one_sweep(direction=direction, tol=tol, max_bond_dim=max_bond_dim) + self._compress_one_sweep( + direction=direction, tol=tol, max_bond_dim=max_bond_dim + ) - def _compress_one_sweep(self, *, direction: str, tol: float, max_bond_dim: int | None) -> None: + def _compress_one_sweep( + self, *, direction: str, tol: float, max_bond_dim: int | None + ) -> None: """Run one in-place MPO SVD compression sweep in the given direction. Args: @@ -1699,7 +1804,9 @@ def _compress_one_sweep(self, *, direction: str, tol: float, max_bond_dim: int | vh = vh[:keep, :] # Left tensor: (bond_dim_left, d, d, keep) -> (d, d, bond_dim_left, keep) - left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose(1, 2, 0, 3) + left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose( + 1, 2, 0, 3 + ) # Right tensor: (keep, d, d, bond_dim_right) -> (d, d, keep, bond_dim_right) svh = (s[:, None] * vh).reshape(keep, phys_dim, phys_dim, bond_dim_right) @@ -1781,7 +1888,7 @@ def from_matrix( mat: np.ndarray, d: int, max_bond: int | None = None, - cutoff: float = 0.0, + cutoff: float = 1e-12, ) -> MPO: """Factorize a dense matrix into an MPO with uniform local dimension ``d``. @@ -1798,8 +1905,8 @@ def from_matrix( max_bond (int | None): Maximum allowed bond dimension (before truncation). cutoff (float): - Singular values ``<= cutoff`` are discarded. By default cutoff=0: all non-zero singular - values are included. + Singular values ``<= cutoff`` are discarded. By default cutoff=1e-12: all numerically non-zero + singular values are included. Returns: MPO: From a51e9fdaef2514118fe4f950384dac87799e8659 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 17 Feb 2026 10:10:07 +0000 Subject: [PATCH 25/25] =?UTF-8?q?=F0=9F=8E=A8=20pre-commit=20fixes?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/mqt/yaqs/core/data_structures/networks.py | 215 +++++------------- 1 file changed, 54 insertions(+), 161 deletions(-) diff --git a/src/mqt/yaqs/core/data_structures/networks.py b/src/mqt/yaqs/core/data_structures/networks.py index 0938616a..0e06399d 100644 --- a/src/mqt/yaqs/core/data_structures/networks.py +++ b/src/mqt/yaqs/core/data_structures/networks.py @@ -174,9 +174,7 @@ def __init__( vector[0] = rng.random() vector[1] = 1 - vector[0] elif state == "basis": - assert ( - basis_string is not None - ), "basis_string must be provided for 'basis' state initialization." + assert basis_string is not None, "basis_string must be provided for 'basis' state initialization." self.init_mps_from_basis(basis_string, self.physical_dimensions) break else: @@ -193,9 +191,7 @@ def __init__( if pad is not None: self.pad_bond_dimension(pad) - def init_mps_from_basis( - self, basis_string: str, physical_dimensions: list[int] - ) -> None: + def init_mps_from_basis(self, basis_string: str, physical_dimensions: list[int]) -> None: """Initialize a list of MPS tensors representing a product state from a basis string. Args: @@ -354,12 +350,8 @@ def get_schmidt_spectrum(self, sites: list[int]) -> NDArray[np.float64]: NDArray[np.float64]: The Schmidt spectrum (length 500), with unused entries filled with NaN. """ - assert ( - len(sites) == 2 - ), "Schmidt spectrum is defined on a bond (two adjacent sites)." - assert ( - sites[0] + 1 == sites[1] - ), "Schmidt spectrum only defined for nearest-neighbor cut." + assert len(sites) == 2, "Schmidt spectrum is defined on a bond (two adjacent sites)." + assert sites[0] + 1 == sites[1], "Schmidt spectrum only defined for nearest-neighbor cut." top_schmidt_vals = 500 i, j = sites a, b = self.tensors[i], self.tensors[j] @@ -415,9 +407,7 @@ def almost_equal(self, other: MPS) -> bool: return False return True - def shift_orthogonality_center_right( - self, current_orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def shift_orthogonality_center_right(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: """Shifts orthogonality center right. This function performs a QR decomposition to shift the known current center to the right and move @@ -451,9 +441,7 @@ def shift_orthogonality_center_right( self.tensors[current_orthogonality_center + 1], ) = (a_new, b_new) - def shift_orthogonality_center_left( - self, current_orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def shift_orthogonality_center_left(self, current_orthogonality_center: int, decomposition: str = "QR") -> None: """Shifts orthogonality center left. This function flips the network, performs a right shift, then flips the network again. @@ -464,14 +452,10 @@ def shift_orthogonality_center_left( Default is QR. """ self.flip_network() - self.shift_orthogonality_center_right( - self.length - current_orthogonality_center - 1, decomposition - ) + self.shift_orthogonality_center_right(self.length - current_orthogonality_center - 1, decomposition) self.flip_network() - def set_canonical_form( - self, orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def set_canonical_form(self, orthogonality_center: int, decomposition: str = "QR") -> None: """Sets canonical form of MPS. Left and right normalizes an MPS around a selected site. @@ -482,9 +466,7 @@ def set_canonical_form( decomposition: Type of decomposition. Default QR. """ - def sweep_decomposition( - orthogonality_center: int, decomposition: str = "QR" - ) -> None: + def sweep_decomposition(orthogonality_center: int, decomposition: str = "QR") -> None: for site, _ in enumerate(self.tensors): if site == orthogonality_center: break @@ -515,17 +497,13 @@ def normalize(self, form: str = "B", decomposition: str = "QR") -> None: if form == "B": self.flip_network() - self.set_canonical_form( - orthogonality_center=self.length - 1, decomposition=decomposition - ) + self.set_canonical_form(orthogonality_center=self.length - 1, decomposition=decomposition) self.shift_orthogonality_center_right(self.length - 1, decomposition) if form == "B": self.flip_network() - def truncate( - self, threshold: float = 1e-12, max_bond_dim: int | None = None - ) -> None: + def truncate(self, threshold: float = 1e-12, max_bond_dim: int | None = None) -> None: """In-place MPS truncation via repeated two-site SVDs.""" orth_center = self.check_canonical_form()[0] if self.length == 1: @@ -547,9 +525,7 @@ def truncate( self.flip_network() - def scalar_product( - self, other: MPS, sites: int | list[int] | None = None - ) -> np.complex128: + def scalar_product(self, other: MPS, sites: int | list[int] | None = None) -> np.complex128: """Compute the scalar (inner) product between two Matrix Product States (MPS). The function contracts the corresponding tensors of two MPS objects. If no specific site is @@ -576,12 +552,8 @@ def scalar_product( result = None for idx in range(self.length): # contract at each site into a 4-leg tensor - theta = oe.contract( - "abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx] - ) - result = ( - theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) - ) + theta = oe.contract("abc,ade->bdce", a_copy.tensors[idx], b_copy.tensors[idx]) + result = theta if idx == 0 else oe.contract("abcd,cdef->abef", result, theta) # squeeze down to scalar assert result is not None return np.complex128(np.squeeze(result)) @@ -613,9 +585,7 @@ def scalar_product( msg = f"Invalid `sites` argument: {sites!r}" raise ValueError(msg) - def local_expect( - self, operator: Observable, sites: int | list[int] - ) -> np.complex128: + def local_expect(self, operator: Observable, sites: int | list[int]) -> np.complex128: """Compute the local expectation value of an operator on an MPS. The function applies the given operator to the tensor at the specified site of a deep copy of the @@ -641,17 +611,11 @@ def local_expect( i = sites if isinstance(operator.sites, list): - assert ( - operator.sites[0] == i - ), f"Operator sites mismatch {operator.sites[0]}, {i}" + assert operator.sites[0] == i, f"Operator sites mismatch {operator.sites[0]}, {i}" elif isinstance(operator.sites, int): - assert ( - operator.sites == i - ), f"Operator sites mismatch {operator.sites}, {i}" + assert operator.sites == i, f"Operator sites mismatch {operator.sites}, {i}" - assert ( - i is not None - ), f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" + assert i is not None, f"Invalid type for 'sites': expected int or list[int], got {type(sites).__name__}" a = temp_state.tensors[i] temp_state.tensors[i] = oe.contract("ab, bcd->acd", operator.gate.matrix, a) @@ -662,12 +626,10 @@ def local_expect( assert operator.sites[0] == i, "Observable sites mismatch" assert operator.sites[1] == j, "Observable sites mismatch" - assert ( - operator.sites[0] < operator.sites[1] - ), "Observable sites must be in ascending order." - assert ( - operator.sites[1] - operator.sites[0] == 1 - ), "Only nearest-neighbor observables are currently implemented." + assert operator.sites[0] < operator.sites[1], "Observable sites must be in ascending order." + assert operator.sites[1] - operator.sites[0] == 1, ( + "Only nearest-neighbor observables are currently implemented." + ) a = temp_state.tensors[i] b = temp_state.tensors[j] d_i, left, _ = a.shape @@ -679,9 +641,7 @@ def local_expect( theta = theta.reshape(left, d_i * d_j, right) # (l, d_i*d_j, r) # 2) apply operator on the combined phys index - theta = oe.contract( - "ab, cbd->cad", operator.gate.matrix, theta - ) # (l, d_i*d_j, r) + theta = oe.contract("ab, cbd->cad", operator.gate.matrix, theta) # (l, d_i*d_j, r) theta = theta.reshape(left, d_i, d_j, right) # back to (l, d_i, d_j, r) # 3) split via SVD @@ -694,9 +654,7 @@ def local_expect( u_tensor = u_mat.reshape(left, d_i, chi_new) # (l, d_i, r_new) a_new = u_tensor.transpose(1, 0, 2) # → (d_i, l, r_new) - v_tensor = (np.diag(s_vec) @ v_mat).reshape( - chi_new, d_j, right - ) # (l_new, d_j, r) + v_tensor = (np.diag(s_vec) @ v_mat).reshape(chi_new, d_j, right) # (l_new, d_j, r) b_new = v_tensor.transpose(1, 0, 2) # → (d_j, l_new, r) temp_state.tensors[i] = a_new @@ -732,44 +690,24 @@ def evaluate_observables( elif observable.gate.name == "total_bond": results[obs_index, column_index] = self.get_total_bond() elif observable.gate.name in {"entropy", "schmidt_spectrum"}: - assert isinstance( - observable.sites, list - ), "Given metric requires a list of sites" - assert ( - len(observable.sites) == 2 - ), "Given metric requires 2 sites to act on." + assert isinstance(observable.sites, list), "Given metric requires a list of sites" + assert len(observable.sites) == 2, "Given metric requires 2 sites to act on." max_site = max(observable.sites) min_site = min(observable.sites) - assert ( - max_site - min_site == 1 - ), "Entropy and Schmidt cuts must be nearest neighbor." + assert max_site - min_site == 1, "Entropy and Schmidt cuts must be nearest neighbor." for s in observable.sites: - assert s in range( - self.length - ), f"Observable acting on non-existing site: {s}" + assert s in range(self.length), f"Observable acting on non-existing site: {s}" if observable.gate.name == "entropy": - results[obs_index, column_index] = self.get_entropy( - observable.sites - ) + results[obs_index, column_index] = self.get_entropy(observable.sites) elif observable.gate.name == "schmidt_spectrum": - results[obs_index, column_index] = self.get_schmidt_spectrum( - observable.sites - ) + results[obs_index, column_index] = self.get_schmidt_spectrum(observable.sites) elif observable.gate.name == "pvm": - assert hasattr( - observable.gate, "bitstring" - ), "Gate does not have attribute bitstring." - results[obs_index, column_index] = self.project_onto_bitstring( - observable.gate.bitstring - ) + assert hasattr(observable.gate, "bitstring"), "Gate does not have attribute bitstring." + results[obs_index, column_index] = self.project_onto_bitstring(observable.gate.bitstring) else: - idx = ( - observable.sites[0] - if isinstance(observable.sites, list) - else observable.sites - ) + idx = observable.sites[0] if isinstance(observable.sites, list) else observable.sites if idx > last_site: for site in range(last_site, idx): temp_state.shift_orthogonality_center_right(site) @@ -794,24 +732,16 @@ def expect(self, observable: Observable) -> np.float64: elif isinstance(observable.sites, list): sites_list = observable.sites - assert ( - sites_list is not None - ), f"Invalid type in expect {type(observable.sites).__name__}" + assert sites_list is not None, f"Invalid type in expect {type(observable.sites).__name__}" - assert ( - len(sites_list) < 3 - ), "Only one- and two-site observables are currently implemented." + assert len(sites_list) < 3, "Only one- and two-site observables are currently implemented." for s in sites_list: - assert s in range( - self.length - ), f"Observable acting on non-existing site: {s}" + assert s in range(self.length), f"Observable acting on non-existing site: {s}" exp = self.local_expect(observable, sites_list) - assert ( - exp.imag < 1e-13 - ), f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." + assert exp.imag < 1e-13, f"Measurement should be real, '{exp.real:16f}+{exp.imag:16f}i'." return exp.real def measure_single_shot(self) -> int: @@ -828,9 +758,7 @@ def measure_single_shot(self) -> int: temp_state = copy.deepcopy(self) bitstring = [] for site, tensor in enumerate(temp_state.tensors): - reduced_density_matrix = oe.contract( - "abc, dbc->ad", tensor, np.conj(tensor) - ) + reduced_density_matrix = oe.contract("abc, dbc->ad", tensor, np.conj(tensor)) probabilities = np.diag(reduced_density_matrix).real rng = np.random.default_rng() chosen_index = rng.choice(len(probabilities), p=probabilities) @@ -844,9 +772,7 @@ def measure_single_shot(self) -> int: temp_state.tensors[site + 1] = ( # noqa: B909 1 / np.sqrt(probabilities[chosen_index]) - * oe.contract( - "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] - ) + * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) ) return sum(c << i for i, c in enumerate(bitstring)) @@ -871,14 +797,10 @@ def measure_shots(self, shots: int) -> dict[int, int]: if shots > 1: max_workers = max(1, multiprocessing.cpu_count() - 1) with ( - concurrent.futures.ProcessPoolExecutor( - max_workers=max_workers - ) as executor, + concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor, tqdm(total=shots, desc="Measuring shots", ncols=80) as pbar, ): - futures = [ - executor.submit(self.measure_single_shot) for _ in range(shots) - ] + futures = [executor.submit(self.measure_single_shot) for _ in range(shots)] for future in concurrent.futures.as_completed(futures): result = future.result() results[result] = results.get(result, 0) + 1 @@ -902,9 +824,7 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: Returns: float: Probability of obtaining the given bitstring under projective measurement. """ - assert ( - len(bitstring) == self.length - ), "Bitstring length must match number of sites" + assert len(bitstring) == self.length, "Bitstring length must match number of sites" temp_state = copy.deepcopy(self) total_norm = 1.0 @@ -912,9 +832,7 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: state_index = int(char) tensor = temp_state.tensors[site] local_dim = self.physical_dimensions[site] - assert ( - 0 <= state_index < local_dim - ), f"Invalid state index {state_index} at site {site}" + assert 0 <= state_index < local_dim, f"Invalid state index {state_index} at site {site}" selected_state = np.zeros(local_dim) selected_state[state_index] = 1 @@ -931,11 +849,7 @@ def project_onto_bitstring(self, bitstring: str) -> np.complex128: # Normalize and propagate if site != self.length - 1: temp_state.tensors[site + 1] = ( - 1 - / norm - * oe.contract( - "ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1] - ) + 1 / norm * oe.contract("ab, cbd->cad", projected_tensor, temp_state.tensors[site + 1]) ) return np.complex128(total_norm**2) @@ -1452,20 +1366,13 @@ def bose_hubbard( tensor[3, 3] = id_boson # build the full tensor list - tensors = [ - np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) - for _ in range(length) - ] + tensors = [np.transpose(tensor.copy(), (2, 3, 0, 1)).astype(np.complex128) for _ in range(length)] # Left boundary: take only row 0 - tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype( - np.complex128 - ) + tensors[0] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, 0:1, :].astype(np.complex128) # Right boundary: take only col 3 - tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype( - np.complex128 - ) + tensors[-1] = np.transpose(tensor.copy(), (2, 3, 0, 1))[:, :, :, 3:4].astype(np.complex128) mpo = cls() mpo.tensors = tensors @@ -1523,9 +1430,7 @@ def finite_state_machine( self.length = len(self.tensors) self.physical_dimension = self.tensors[0].shape[0] - def custom( - self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True - ) -> None: + def custom(self, tensors: list[NDArray[np.complex128]], *, transpose: bool = True) -> None: """Custom MPO from tensors. Initialize the custom MPO (Matrix Product Operator) with the given tensors. @@ -1593,9 +1498,7 @@ def from_pauli_sum( self.physical_dimension = physical_dimension if not terms: - self.tensors = [ - np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length) - ] + self.tensors = [np.zeros((2, 2, 1, 1), dtype=complex) for _ in range(length)] return # 1. Parse terms into dense lists of operator names. @@ -1629,9 +1532,7 @@ def from_pauli_sum( term_trajectories[t_idx][length] = 0 # bond_state_maps[i] stores the mapping: (Op_str, Next_State_ID) -> Current_State_ID - bond_state_maps: list[dict[tuple[str, int], int]] = [ - {} for _ in range(length + 1) - ] + bond_state_maps: list[dict[tuple[str, int], int]] = [{} for _ in range(length + 1)] # Sweep Right-to-Left (sites L-1 down to 1) to build the FSM transitions. # We stop at bond 1. Bond 0 is always the single "Start" state. @@ -1697,9 +1598,7 @@ def from_pauli_sum( # The FSM construction is optimal for one-sided (suffix) uniqueness. # A standard two-sweep compression ("lr_rl") puts the MPO in canonical form # and removes any remaining redundancies (e.g., common prefixes). - self.compress( - tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl" - ) + self.compress(tol=tol, max_bond_dim=max_bond_dim, n_sweeps=n_sweeps, directions="lr_rl") assert self.check_if_valid_mpo(), "MPO initialized wrong" def compress( @@ -1748,13 +1647,9 @@ def compress( for _ in range(n_sweeps): for direction in schedule: - self._compress_one_sweep( - direction=direction, tol=tol, max_bond_dim=max_bond_dim - ) + self._compress_one_sweep(direction=direction, tol=tol, max_bond_dim=max_bond_dim) - def _compress_one_sweep( - self, *, direction: str, tol: float, max_bond_dim: int | None - ) -> None: + def _compress_one_sweep(self, *, direction: str, tol: float, max_bond_dim: int | None) -> None: """Run one in-place MPO SVD compression sweep in the given direction. Args: @@ -1804,9 +1699,7 @@ def _compress_one_sweep( vh = vh[:keep, :] # Left tensor: (bond_dim_left, d, d, keep) -> (d, d, bond_dim_left, keep) - left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose( - 1, 2, 0, 3 - ) + left = u.reshape(bond_dim_left, phys_dim, phys_dim, keep).transpose(1, 2, 0, 3) # Right tensor: (keep, d, d, bond_dim_right) -> (d, d, keep, bond_dim_right) svh = (s[:, None] * vh).reshape(keep, phys_dim, phys_dim, bond_dim_right)