Skip to content

Commit 21382c2

Browse files
fix some api in dmcircuit
1 parent caf6459 commit 21382c2

10 files changed

+90
-6
lines changed

CHANGELOG.md

+7-1
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,15 @@
22

33
## Unreleased
44

5+
### Added
6+
7+
- Add `expectation_ps` method for `DMCircuit`
8+
59
### Fixed
610

7-
- With `Circuit.vis_tex`, for the Circuit has customized input state, the default visualization is psi instead of all zeros now.
11+
- With `Circuit.vis_tex`, for the Circuit has customized input state, the default visualization is psi instead of all zeros now
12+
13+
- `general_kraus` is synced with `apply_general_kraus` for `DMCircuit`
814

915
## 0.0.220402
1016

docs/source/conf.py

+2
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,8 @@
4848
"nbsphinx",
4949
]
5050

51+
autosectionlabel_prefix_document = True
52+
5153
# Add any paths that contain templates here, relative to this directory.
5254
templates_path = ["_templates"]
5355

docs/source/faq.rst

+16
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,22 @@ For PyTorch, we can in pricinple wrap the corresponding quantum function into a
4040
In terms of Jax backend, we highly suggested to keep the functional programming paradigm for such machine learning task.
4141
Besides, it is worthing noting that, jit and vmap is automatically taken care of in ``QuantumLayer``.
4242

43+
When do I need to customize the contractor and how?
44+
------------------------------------------------------
45+
46+
As a rule of thumb, for circuit with qubit count larger than 16 and circuit depth larger than 8, customized contraction may outperform the deafult built-in greedy contraction strategy.
47+
48+
To setup or not setup the customized contractor is about a trade-off between the time on contraction path finding and the time on the real contraction via matmul.
49+
50+
The customized contractor cost much more time than default contractor in terms of contraction path searching, and via the path it finds, the real contraction can take less time and space.
51+
52+
If the circuit simulation time is the bottleneck of the whole workflow, one can always try customized contractor to see whether there is some performance improvement.
53+
54+
We recommend to use `cotengra library <https://cotengra.readthedocs.io/en/latest/index.html>`_ to setup the contractor, since there are lots of interesting hyperparameters to tune, we can achieve better trade-off between the time on contraction path search and the time on the real tensor network contraction.
55+
56+
It is also worth noting that for jitted function which we usually use, the contraction path search is only called at the first run of the function, which further amortize the time and favor the use of highly customized contractor.
57+
58+
In terms of how-to on contractor setup, please refer to :ref:`quickstart:Setup the Contractor`.
4359

4460
Is there some API less cumbersome than ``expectation`` for Pauli string?
4561
----------------------------------------------------------------------------

docs/source/tutorial.rst

+1
Original file line numberDiff line numberDiff line change
@@ -13,5 +13,6 @@ Jupyter Tutorials
1313
tutorials/gradient_benchmark.ipynb
1414
tutorials/contractors.ipynb
1515
tutorials/operator_spreading.ipynb
16+
tutorials/optimization_and_expressibility.ipynb
1617
tutorials/vqex_mbl.ipynb
1718
tutorials/dqas.ipynb

docs/source/tutorial_cn.rst

+1
Original file line numberDiff line numberDiff line change
@@ -13,5 +13,6 @@
1313
tutorials/gradient_benchmark_cn.ipynb
1414
tutorials/contractors_cn.ipynb
1515
tutorials/operator_spreading_cn.ipynb
16+
tutorials/optimization_and_expressibility_cn.ipynb
1617
tutorials/vqex_mbl_cn.ipynb
1718
tutorials/dqas_cn.ipynb

examples/vqe_extra_mpo.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@
1818
import tensorflow as tf
1919
import tensornetwork as tn
2020
import cotengra as ctg
21-
2221
import optax
22+
2323
import tensorcircuit as tc
2424

2525
opt = ctg.ReusableHyperOptimizer(

examples/vqe_extra_mpo_spopt.py

-2
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,9 @@
1919

2020
sys.setrecursionlimit(10000)
2121

22-
import tensorflow as tf
2322
import tensornetwork as tn
2423
import cotengra as ctg
2524

26-
import optax
2725
import tensorcircuit as tc
2826

2927
optc = ctg.ReusableHyperOptimizer(

tensorcircuit/densitymatrix.py

+39-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
from . import gates
1414
from . import channels
1515
from .circuit import Circuit
16-
from .cons import backend, contractor, npdtype
16+
from .cons import backend, contractor, npdtype, dtypestr
1717

1818
Gate = gates.Gate
1919
Tensor = Any
@@ -50,6 +50,8 @@ def __init__(
5050
lnodes.extend(nodes)
5151
self._nodes = lnodes
5252
elif inputs is not None:
53+
inputs = backend.convert_to_tensor(inputs)
54+
inputs = backend.cast(inputs, dtype=dtypestr)
5355
inputs = backend.reshape(inputs, [-1])
5456
N = inputs.shape[0]
5557
n = int(np.log(N) / np.log(2))
@@ -63,6 +65,8 @@ def __init__(
6365
lnodes.extend(nodes)
6466
self._nodes = lnodes
6567
else: # dminputs is not None
68+
dminputs = backend.convert_to_tensor(dminputs)
69+
dminputs = backend.cast(dminputs, dtype=dtypestr)
6670
dminputs = backend.reshape(dminputs, [2 for _ in range(2 * nqubits)])
6771
dminputs = Gate(dminputs)
6872
nodes = [dminputs]
@@ -269,5 +273,39 @@ def expectation(self, *ops: Tuple[tn.Node, List[int]]) -> tn.Node.tensor:
269273
def check_density_matrix(dm: Tensor) -> None:
270274
assert np.allclose(backend.trace(dm), 1.0, atol=1e-5)
271275

276+
def expectation_ps(
277+
self,
278+
x: Optional[Sequence[int]] = None,
279+
y: Optional[Sequence[int]] = None,
280+
z: Optional[Sequence[int]] = None,
281+
reuse: bool = True,
282+
) -> Tensor:
283+
"""
284+
Shortcut for Pauli string expectation.
285+
x, y, z list are for X, Y, Z positions
286+
287+
:param x: _description_, defaults to None
288+
:type x: Optional[Sequence[int]], optional
289+
:param y: _description_, defaults to None
290+
:type y: Optional[Sequence[int]], optional
291+
:param z: _description_, defaults to None
292+
:type z: Optional[Sequence[int]], optional
293+
:param reuse: whether to cache and reuse the wavefunction, defaults to True
294+
:type reuse: bool, optional
295+
:return: Expectation value
296+
:rtype: Tensor
297+
"""
298+
obs = []
299+
if x is not None:
300+
for i in x:
301+
obs.append([gates.x(), [i]]) # type: ignore
302+
if y is not None:
303+
for i in y:
304+
obs.append([gates.y(), [i]]) # type: ignore
305+
if z is not None:
306+
for i in z:
307+
obs.append([gates.z(), [i]]) # type: ignore
308+
return self.expectation(*obs) # type: ignore
309+
272310

273311
DMCircuit._meta_apply()

tensorcircuit/densitymatrix2.py

+11-1
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,10 @@
55

66
from typing import Any, Callable, Sequence
77

8+
import tensornetwork as tn
9+
810
from . import gates
9-
from .cons import backend
11+
from .cons import backend, dtypestr
1012
from .channels import kraus_to_super_gate
1113
from .densitymatrix import DMCircuit
1214

@@ -26,6 +28,12 @@ def _copy_DMCircuit(self) -> "DMCircuit2":
2628

2729
def apply_general_kraus(self, kraus: Sequence[Gate], *index: int) -> None: # type: ignore
2830
# incompatible API for now
31+
kraus = [
32+
k
33+
if isinstance(k, tn.Node)
34+
else Gate(backend.cast(backend.convert_to_tensor(k), dtypestr))
35+
for k in kraus
36+
]
2937
self.check_kraus(kraus)
3038
if not isinstance(
3139
index[0], int
@@ -48,6 +56,8 @@ def apply_general_kraus(self, kraus: Sequence[Gate], *index: int) -> None: # ty
4856
self._nodes.append(super_op)
4957
setattr(self, "state_tensor", None)
5058

59+
general_kraus = apply_general_kraus # type: ignore
60+
5161
@staticmethod
5262
def apply_general_kraus_delayed(
5363
krausf: Callable[..., Sequence[Gate]]

tests/test_dmcircuit.py

+12
Original file line numberDiff line numberDiff line change
@@ -226,3 +226,15 @@ def test_ad_channel(tfb):
226226
g = tape.gradient(loss, p)
227227
np.testing.assert_allclose(loss.numpy(), 0.7562, atol=1e-4)
228228
np.testing.assert_allclose(g.numpy(), -2.388, atol=1e-4)
229+
230+
231+
@pytest.mark.parametrize("backend", [lf("npb"), lf("tfb"), lf("jaxb"), lf("torchb")])
232+
def test_inputs_pipeline(backend):
233+
dm = 0.25 * np.eye(4)
234+
c = tc.DMCircuit(2, dminputs=dm)
235+
c.H(0)
236+
c.general_kraus(
237+
[np.sqrt(0.5) * tc.gates._i_matrix, np.sqrt(0.5) * tc.gates._x_matrix], 1
238+
)
239+
r = c.expectation_ps(z=[0])
240+
np.testing.assert_allclose(r, 0.0, atol=1e-5)

0 commit comments

Comments
 (0)