Skip to content

Commit 06ac704

Browse files
committedNov 1, 2022
make mps benchmark uptodate
1 parent d788b8a commit 06ac704

File tree

3 files changed

+27
-11
lines changed

3 files changed

+27
-11
lines changed
 

‎benchmarks/scripts/utils.py

+11-6
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66
import numpy as np
77
import tensorflow as tf
88
from pathlib import Path
9+
import optax
10+
import tensorcircuit as tc
911

1012

1113
qml_data = {}
@@ -218,12 +220,11 @@ def save(data, _uuid, path):
218220

219221
def timing(f, nitrs, timeLimit):
220222
t0 = time.time()
221-
print(f())
223+
a = f()
222224
t1 = time.time()
223225
Nitrs = 1e-8
224226
for i in range(nitrs):
225227
a = f()
226-
print(a)
227228
# if a != None:
228229
# print(a)
229230
if time.time() - t1 > timeLimit:
@@ -263,15 +264,19 @@ def qml_timing(f, nbatch, nitrs, timeLimit, tfq=False):
263264

264265

265266
class Opt:
266-
def __init__(self, f, params, lr=0.01, tuning=True):
267+
def __init__(self, f, params, lr=0.002, tuning=True, backend="tensorflow"):
267268
self.f = f
268269
self.params = params
269-
self.adam = tf.keras.optimizers.Adam(lr)
270+
if backend == "tensorflow":
271+
self.adam = tc.backend.optimizer(tf.keras.optimizers.Adam(lr))
272+
else:
273+
self.adam = tc.backend.optimizer(optax.adam(lr))
270274
self.tuning = tuning
271275

272276
def step(self):
273277
e, grad = self.f(*self.params)
274278
if self.tuning:
275-
grad = [tf.convert_to_tensor(g) for g in grad]
276-
self.adam.apply_gradients(zip(grad, self.params))
279+
self.params = self.adam.update(grad, self.params)
280+
# grad = [tf.convert_to_tensor(g) for g in grad]
281+
# self.adam.apply_gradients(zip(grad, self.params))
277282
return e[()]

‎benchmarks/scripts/vqe_tc.py

+6-5
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ def energy_raw(paramx, paramzz):
109109
c = tc.Circuit(n)
110110
else:
111111
c = tc.MPSCircuit(n)
112-
c.set_truncation_rule(max_singular_values=mpsd)
112+
c.set_split_rules({"max_singular_values": mpsd})
113113
paramx = tc.backend.cast(paramx, dtype)
114114
paramzz = tc.backend.cast(paramzz, dtype)
115115

@@ -132,8 +132,8 @@ def energy_raw(paramx, paramzz):
132132
e = tfi_energy(c, n)
133133
fd = c._fidelity
134134
# tensorflow only works for complex case, while jax only works for real case, don't know how to solve it
135-
if tcbackend != "tensorflow":
136-
e = tc.backend.real(e)
135+
# if tcbackend != "tensorflow":
136+
e = tc.backend.real(e)
137137

138138
return e, fd
139139

@@ -144,12 +144,13 @@ def energy(paramx, paramzz):
144144
# paramx = tc.backend.convert_to_tensor(paramx)
145145
# paramzz = tc.backend.convert_to_tensor(paramzz)
146146
(value, f), grads = energy_raw(paramx, paramzz)
147-
print(tc.backend.numpy(f), tc.backend.numpy(value))
147+
print("fidelity: ", f, value)
148+
# print(tc.backend.numpy(f), tc.backend.numpy(value))
148149
# value = tc.backend.numpy(tc.backend.real(value))
149150
# grads = [tc.backend.numpy(tc.backend.real(g)) for g in grads]
150151
return value, grads
151152

152-
opt = utils.Opt(energy, [paramx, paramzz], tuning=False)
153+
opt = utils.Opt(energy, [paramx, paramzz], tuning=True, backend=tcbackend)
153154
ct, it, Nitrs = utils.timing(opt.step, nitrs, timeLimit)
154155
meta["Results"]["with jit"] = {
155156
"Construction time": ct,

‎docs/source/advance.rst

+10
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,16 @@ MPS Simulator
77

88
(Still experimental support)
99

10+
Very simple, we provide the same set of API for ``MPSCircuit`` as ``Circuit``,
11+
the only new line is to set the bond dimension for the new simulator.
12+
13+
.. code-block:: python
14+
15+
c = tc.MPSCircuit(n)
16+
c.set_split_rules({"max_singular_values": 50})
17+
18+
The larger bond dimension we set, the better approximation ratio (of course the most computational cost we pay)
19+
1020
Split Two-qubit Gates
1121
-------------------------
1222

0 commit comments

Comments
 (0)