Skip to content

Commit 5dc6395

Browse files
megre benchmark repo
1 parent 2a8995b commit 5dc6395

16 files changed

+1787
-6
lines changed

benchmarks/README.md

+9
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
# benchmark4tc
2+
3+
`cd scripts`
4+
5+
`python benchmark.py -n [# of Qubits] -nlayer [# of QC layers] -nitrs [# of max iterations] -t [time limitation] -gpu [0 for no gpu and 1 for gpu enabled] -tcbackend [jax or tensorflow]`
6+
7+
then a `.json` file will be created in data folder which contains the information of benchmarking parameters and results.
8+
9+
Since tensorcircuit may be installed in a local dir, you may have to firstly set in terminal: `export PYTHONPATH=/abs/path/for/tc`.

benchmarks/dataset/.gitkeep

Whitespace-only changes.

benchmarks/requirements.txt

+10
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
pennylane==0.18
2+
tensorflow==2.4.1
3+
tensorflow-quantum==0.5.1
4+
tensornetwork==0.4.5
5+
#jaxlib==0.1.71
6+
jax==0.2.21
7+
networkx
8+
sympy
9+
py-cpuinfo
10+
scikit-learn

benchmarks/scripts/benchmark.py

+33
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
import uuid
2+
import os
3+
import utils
4+
5+
6+
if __name__ == "__main__":
7+
_uuid = str(uuid.uuid4())
8+
nwires, nlayer, nitrs, timeLimit, isgpu, minus, path = utils.arg()
9+
if isgpu == 0:
10+
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
11+
12+
else:
13+
import tensorflow as tf
14+
15+
gpu = tf.config.list_physical_devices("GPU")
16+
tf.config.experimental.set_memory_growth(device=gpu[0], enable=True)
17+
from vqe_pennylane import pennylane_benchmark
18+
from vqe_tc_tf import tensorcircuit_tf_benchmark
19+
from vqe_tc_jax import tensorcircuit_jax_benchmark
20+
from vqe_tfquantum import tfquantum_benchmark
21+
22+
pl_json = pennylane_benchmark(_uuid, nwires, nlayer, nitrs, timeLimit, isgpu)
23+
tfq_json = tfquantum_benchmark(_uuid, nwires, nlayer, nitrs, timeLimit, isgpu)
24+
tc32_json = tensorcircuit_tf_benchmark(
25+
_uuid, nwires, nlayer, nitrs, timeLimit, isgpu, "32"
26+
)
27+
tc64_json = tensorcircuit_tf_benchmark(
28+
_uuid, nwires, nlayer, nitrs, timeLimit, isgpu, "64"
29+
)
30+
tcjax_json = tensorcircuit_jax_benchmark(
31+
_uuid, nwires, nlayer, nitrs, timeLimit, isgpu
32+
)
33+
utils.save([pl_json, tfq_json, tc32_json, tc64_json, tcjax_json], _uuid, path)

benchmarks/scripts/qml_benchmark.py

+96
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
import uuid
2+
import os
3+
import utils
4+
5+
6+
if __name__ == "__main__":
7+
_uuid = str(uuid.uuid4())
8+
nwires, nlayer, nitrs, timeLimit, isgpu, minus, path, nbatch = utils.arg(qml=True)
9+
if isgpu == 0:
10+
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
11+
12+
else:
13+
import tensorflow as tf
14+
15+
gpu = tf.config.list_physical_devices("GPU")
16+
tf.config.experimental.set_memory_growth(device=gpu[0], enable=True)
17+
18+
train_img, test_img, train_lbl, test_lbl = utils.mnist_data_preprocessing(
19+
nwires - 1
20+
)
21+
22+
from qml_pennylane import pennylane_benchmark
23+
from qml_tc_tf import tensorcircuit_tf_benchmark
24+
from qml_tc_jax import tensorcircuit_jax_benchmark
25+
from qml_tfquantum import tfquantum_benchmark
26+
27+
pl_json = pennylane_benchmark(
28+
_uuid,
29+
nwires,
30+
nlayer,
31+
nitrs,
32+
timeLimit,
33+
isgpu,
34+
train_img,
35+
test_img,
36+
train_lbl,
37+
test_lbl,
38+
nbatch,
39+
False,
40+
)
41+
tfq_json = tfquantum_benchmark(
42+
_uuid,
43+
nwires,
44+
nlayer,
45+
nitrs,
46+
timeLimit,
47+
isgpu,
48+
train_img,
49+
test_img,
50+
train_lbl,
51+
test_lbl,
52+
nbatch,
53+
)
54+
tc32_json = tensorcircuit_tf_benchmark(
55+
_uuid,
56+
nwires,
57+
nlayer,
58+
nitrs,
59+
timeLimit,
60+
isgpu,
61+
train_img,
62+
test_img,
63+
train_lbl,
64+
test_lbl,
65+
nbatch,
66+
"32",
67+
)
68+
tc64_json = tensorcircuit_tf_benchmark(
69+
_uuid,
70+
nwires,
71+
nlayer,
72+
nitrs,
73+
timeLimit,
74+
isgpu,
75+
train_img,
76+
test_img,
77+
train_lbl,
78+
test_lbl,
79+
nbatch,
80+
"64",
81+
)
82+
tcjax_json = tensorcircuit_jax_benchmark(
83+
_uuid,
84+
nwires,
85+
nlayer,
86+
nitrs,
87+
timeLimit,
88+
isgpu,
89+
train_img,
90+
test_img,
91+
train_lbl,
92+
test_lbl,
93+
nbatch,
94+
"64",
95+
)
96+
utils.save([pl_json, tfq_json, tc32_json, tc64_json, tcjax_json], _uuid, path)

benchmarks/scripts/qml_pennylane.py

+244
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,244 @@
1+
import sys
2+
import pennylane as qml
3+
import tensorflow as tf
4+
import numpy as np
5+
import utils
6+
import time
7+
import cpuinfo
8+
import datetime
9+
import os
10+
import jax
11+
import uuid
12+
import tensorcircuit as tc
13+
14+
tc.set_backend("tensorflow")
15+
16+
17+
def pennylane_benchmark(
18+
uuid,
19+
nwires,
20+
nlayer,
21+
nitrs,
22+
timeLimit,
23+
isgpu,
24+
train_img,
25+
test_img,
26+
train_lbl,
27+
test_lbl,
28+
nbatch,
29+
check_loop,
30+
):
31+
meta = {}
32+
33+
if isgpu == 0:
34+
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
35+
meta["isgpu"] = "off"
36+
37+
else:
38+
gpu = tf.config.list_physical_devices("GPU")
39+
tf.config.experimental.set_memory_growth(device=gpu[0], enable=True)
40+
meta["isgpu"] = "on"
41+
meta["Gpuinfo"] = utils.gpuinfo()
42+
43+
meta["Software"] = "pennylane"
44+
meta["Cpuinfo"] = cpuinfo.get_cpu_info()["brand_raw"]
45+
meta["Version"] = {
46+
"sys": sys.version,
47+
"tensorflow": tf.__version__,
48+
"pennylane": qml.__version__,
49+
"numpy": np.__version__,
50+
}
51+
meta["QML test parameters"] = {
52+
"nQubits": nwires,
53+
"nlayer": nlayer,
54+
"nitrs": nitrs,
55+
"timeLimit": timeLimit,
56+
"nbatch": nbatch,
57+
}
58+
meta["UUID"] = uuid
59+
meta["Benchmark Time"] = (
60+
datetime.datetime.now().astimezone().strftime("%Y-%m-%d %H:%M %Z")
61+
)
62+
meta["Results"] = {}
63+
64+
dev = qml.device("default.qubit.jax", wires=nwires)
65+
66+
@qml.qnode(dev, interface="jax")
67+
def jax_expval(img, params):
68+
for i in range(nwires - 1):
69+
qml.RX(img[i] * np.pi, wires=i)
70+
for j in range(nlayer):
71+
for i in range(nwires - 1):
72+
qml.IsingZZ(params[i + j * 2 * nwires], wires=[i, nwires - 1])
73+
for i in range(nwires):
74+
qml.RX(params[nwires + i + j * 2 * nwires], wires=i)
75+
return qml.expval(qml.Hamiltonian([1.0], [qml.PauliZ(nwires - 1)], True))
76+
77+
def loss(img, lbl, params):
78+
return (lbl - jax_expval(img, params) * 0.5 - 0.5) ** 2
79+
80+
vag = jax.value_and_grad(loss, argnums=(2,))
81+
vag = jax.jit(vag)
82+
params = jax.numpy.array(np.random.normal(size=[nlayer * 2 * nwires]))
83+
84+
def f(train_imgs, train_lbls):
85+
e = []
86+
for i in range(len(train_imgs)):
87+
ee, grad = vag(train_imgs[i], train_lbls[i], params)
88+
e.append(ee)
89+
return np.mean(e)
90+
91+
if check_loop:
92+
ct, it, Nitrs = utils.qml_timing(f, nbatch, nitrs, timeLimit)
93+
meta["Results"]["jax_loop"] = {
94+
"Construction time": ct,
95+
"Iteration time": it,
96+
"# of actual iterations": Nitrs,
97+
}
98+
_vloss = jax.vmap(loss, (0, 0, None), 0)
99+
100+
def vloss(img, lbl, params):
101+
return jax.numpy.mean(_vloss(img, lbl, params))
102+
103+
vag = jax.value_and_grad(vloss, argnums=(2,))
104+
params = jax.numpy.array(np.random.normal(size=[nlayer * 2 * nwires]))
105+
106+
# def f(train_imgs, train_lbls):
107+
# e, grad = vag(jax.numpy.array(train_imgs), jax.numpy.array(train_lbls), params)
108+
# return e
109+
110+
# ct, it, Nitrs = utils.qml_timing(f, nbatch, nitrs, timeLimit)
111+
# meta["Results"]["jax_vmap"] = {
112+
# "Construction time": ct,
113+
# "Iteration time": it,
114+
# "# of actual iterations": Nitrs,
115+
# }
116+
vag = jax.jit(vag)
117+
118+
def f(train_imgs, train_lbls):
119+
e, grad = vag(jax.numpy.array(train_imgs), jax.numpy.array(train_lbls), params)
120+
return e
121+
122+
ct, it, Nitrs = utils.qml_timing(f, nbatch, nitrs, timeLimit)
123+
meta["Results"]["jax_vmap"] = {
124+
"Construction time": ct,
125+
"Iteration time": it,
126+
"# of actual iterations": Nitrs,
127+
}
128+
129+
print(meta) # in case OOM in tf case
130+
131+
## tf device below
132+
133+
dev = qml.device("default.qubit.tf", wires=nwires)
134+
135+
@qml.qnode(dev, interface="tf")
136+
def tf_value(img, params):
137+
for i in range(nwires - 1):
138+
qml.RX(img[i] * np.pi, wires=i)
139+
for j in range(nlayer):
140+
for i in range(nwires - 1):
141+
qml.IsingZZ(params[i + j * 2 * nwires], wires=[i, nwires - 1])
142+
for i in range(nwires):
143+
qml.RX(params[nwires + i + j * 2 * nwires], wires=i)
144+
return qml.expval(qml.Hamiltonian([1.0], [qml.PauliZ(nwires - 1)], True))
145+
146+
@tf.function
147+
def tf_vag(img, lbl, params):
148+
with tf.GradientTape() as t:
149+
t.watch(params)
150+
loss = (tf_value(img, params) * 0.5 + 0.5 - lbl) ** 2
151+
return loss, t.gradient(loss, params)
152+
153+
params = tf.Variable(np.random.normal(size=[nlayer * 2 * nwires]), dtype=tf.float64)
154+
a = tf.Variable(train_img[0])
155+
b = tf.Variable(train_lbl[0], dtype=tf.float64)
156+
opt = tf.keras.optimizers.Adam(0.01)
157+
158+
def f(train_imgs, train_lbls):
159+
loss_ = []
160+
grad_ = []
161+
for i in range(len(train_imgs)):
162+
a.assign(train_imgs[i])
163+
b.assign(train_lbls[i])
164+
loss, grad = tf_vag(a, b, params)
165+
loss_.append(loss)
166+
grad_.append(grad)
167+
opt.apply_gradients(zip([tf.reduce_mean(grad_, axis=0)], [params]))
168+
return np.mean(loss_)
169+
170+
if check_loop:
171+
ct, it, Nitrs = utils.qml_timing(f, nbatch, nitrs, timeLimit)
172+
meta["Results"]["tf_loop"] = {
173+
"Construction time": ct,
174+
"Iteration time": it,
175+
"# of actual iterations": Nitrs,
176+
}
177+
178+
a = tf.Variable(train_img[:nbatch], dtype=tf.float64)
179+
b = tf.Variable(train_lbl[:nbatch], dtype=tf.float64)
180+
181+
opt = tf.keras.optimizers.Adam(0.01)
182+
183+
@tf.function
184+
@qml.qnode(dev, interface="tf")
185+
def tfvalue(img, lbl, params):
186+
for i in range(nwires - 1):
187+
qml.RX(img[i] * np.pi, wires=i)
188+
for j in range(nlayer):
189+
for i in range(nwires - 1):
190+
qml.IsingZZ(params[i + j * 2 * nwires], wires=[i, nwires - 1])
191+
for i in range(nwires):
192+
qml.RX(params[nwires + i + j * 2 * nwires], wires=i)
193+
return qml.expval(qml.Hamiltonian([1.0], [qml.PauliZ(nwires - 1)], True))
194+
195+
def tf_loss(img, lbl, params):
196+
loss = (tf_value(img, params) * 0.5 + 0.5 - lbl) ** 2
197+
return loss
198+
199+
tf_vvag = tf.function(
200+
tc.backend.vvag(tf_loss, vectorized_argnums=(0, 1), argnums=2)
201+
)
202+
203+
def f(train_imgs, train_lbls):
204+
a.assign(train_imgs)
205+
b.assign(train_lbls)
206+
losses, grad = tf_vvag(a, b, params)
207+
opt.apply_gradients(zip([grad], [params]))
208+
return tf.reduce_mean(losses)
209+
210+
ct, it, Nitrs = utils.qml_timing(f, nbatch, nitrs, timeLimit)
211+
meta["Results"]["tf_vmap"] = {
212+
"Construction time": ct,
213+
"Iteration time": it,
214+
"# of actual iterations": Nitrs,
215+
}
216+
print(meta)
217+
return meta
218+
219+
220+
if __name__ == "__main__":
221+
_uuid = str(uuid.uuid4())
222+
n, nlayer, nitrs, timeLimit, isgpu, minus, path, check, nbatch = utils.arg(
223+
check=True, qml=True
224+
)
225+
train_img, test_img, train_lbl, test_lbl = utils.mnist_data_preprocessing(n - 1)
226+
if check == 1:
227+
checkbool = True
228+
else:
229+
checkbool = False
230+
results = pennylane_benchmark(
231+
_uuid,
232+
n,
233+
nlayer,
234+
nitrs,
235+
timeLimit,
236+
isgpu,
237+
train_img,
238+
test_img,
239+
train_lbl,
240+
test_lbl,
241+
nbatch,
242+
checkbool,
243+
)
244+
utils.save(results, _uuid, path)

0 commit comments

Comments
 (0)