Skip to content

Commit bb91965

Browse files
further rename to vg
1 parent 92335b2 commit bb91965

File tree

9 files changed

+425
-497
lines changed

9 files changed

+425
-497
lines changed

docs/source/tutorials/mnist_qml.ipynb

+198-234
Large diffs are not rendered by default.

docs/source/tutorials/mnist_qml_cn.ipynb

+198-234
Large diffs are not rendered by default.

examples/chaotic_behavior.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -82,11 +82,11 @@ def get_zz(params, n, nlayers, inputs=None):
8282

8383
# optimization, suppose the energy we want to minimize is just z1z2 as above
8484

85-
vag_func = K.jit(K.value_and_grad(get_zz), static_argnums=(1, 2))
85+
vg_func = K.jit(K.value_and_grad(get_zz), static_argnums=(1, 2))
8686
opt = K.optimizer(tf.keras.optimizers.Adam(1e-2))
8787

8888
for i in range(200): # gradient descent
89-
energy, grads = vag_func(params, 10, 5)
89+
energy, grads = vg_func(params, 10, 5)
9090
params = opt.update(grads, params)
9191
if i % 20 == 0:
9292
print(energy) # see energy optimization dynamics

examples/checkpoint_memsave.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -106,13 +106,13 @@ def vqe_forward(param):
106106

107107
def profile(tries=3):
108108
time0 = time.time()
109-
tc_vag = tc.backend.jit(tc.backend.value_and_grad(vqe_forward))
109+
tc_vg = tc.backend.jit(tc.backend.value_and_grad(vqe_forward))
110110
param = tc.backend.cast(tc.backend.ones([nlayers, 2 * nwires]), "complex64")
111-
print(tc_vag(param))
111+
print(tc_vg(param))
112112

113113
time1 = time.time()
114114
for _ in range(tries):
115-
print(tc_vag(param)[0])
115+
print(tc_vg(param)[0])
116116

117117
time2 = time.time()
118118
print(time1 - time0, (time2 - time1) / tries)

examples/simple_qaoa.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ def QAOAansatz(gamma, beta, g=example_graph):
6767

6868
# 3. get compiled function for QAOA ansatz and its gradient
6969

70-
QAOA_vag = K.jit(K.value_and_grad(QAOAansatz, argnums=(0, 1)), static_argnums=2)
70+
QAOA_vg = K.jit(K.value_and_grad(QAOAansatz, argnums=(0, 1)), static_argnums=2)
7171

7272

7373
# 4. optimization loop
@@ -77,6 +77,6 @@ def QAOAansatz(gamma, beta, g=example_graph):
7777
opt = K.optimizer(tf.keras.optimizers.Adam(1e-2))
7878

7979
for i in range(100):
80-
loss, grads = QAOA_vag(gamma, beta, example_graph)
80+
loss, grads = QAOA_vg(gamma, beta, example_graph)
8181
print(K.numpy(loss))
8282
gamma, beta = opt.update(grads, [gamma, beta]) # gradient descent

examples/vqe_extra.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ def vqe_forward(param, structures):
9696
time0 = time.time()
9797

9898
batch = 50
99-
tc_vag = tf.function(
99+
tc_vg = tf.function(
100100
tc.backend.vectorized_value_and_grad(vqe_forward, argnums=0, vectorized_argnums=1),
101101
input_signature=[
102102
tf.TensorSpec([2 * nlayers, nwires], tf.float32),
@@ -105,23 +105,23 @@ def vqe_forward(param, structures):
105105
)
106106
param = tf.Variable(tf.random.normal(stddev=0.1, shape=[2 * nlayers, nwires]))
107107

108-
print(tc_vag(param, structures[:batch]))
108+
print(tc_vg(param, structures[:batch]))
109109

110110
time1 = time.time()
111111
print("staging time: ", time1 - time0)
112112

113113
try:
114-
keras.save_func(tc_vag, "./funcs/%s_%s_10_tfim" % (nwires, nlayers))
114+
keras.save_func(tc_vg, "./funcs/%s_%s_10_tfim" % (nwires, nlayers))
115115
except ValueError as e:
116116
print(e) # keras.save_func now has issues to be resolved
117117

118118

119119
def train_step(param):
120-
vag_list = []
120+
vg_list = []
121121
for i in range(2):
122-
vag_list.append(tc_vag(param, structures[i * nwires : i * nwires + nwires]))
123-
loss = tc.backend.sum(vag_list[0][0] - vag_list[1][0])
124-
gr = vag_list[0][1] - vag_list[1][1]
122+
vg_list.append(tc_vg(param, structures[i * nwires : i * nwires + nwires]))
123+
loss = tc.backend.sum(vg_list[0][0] - vg_list[1][0])
124+
gr = vg_list[0][1] - vg_list[1][1]
125125
return loss, gr
126126

127127

examples/vqe_extra_mpo.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -94,15 +94,15 @@ def vqe_forward(param):
9494

9595
time0 = time.time()
9696
if refresh:
97-
tc_vag = tf.function(
97+
tc_vg = tf.function(
9898
tc.backend.value_and_grad(vqe_forward),
9999
input_signature=[tf.TensorSpec([4 * nlayers, nwires], tf.float32)],
100100
)
101-
tc.keras.save_func(tc_vag, "./funcs/%s_%s_tfim_mpo" % (nwires, nlayers))
101+
tc.keras.save_func(tc_vg, "./funcs/%s_%s_tfim_mpo" % (nwires, nlayers))
102102
time1 = time.time()
103103
print("staging time: ", time1 - time0)
104104

105-
tc_vag_loaded = tc.keras.load_func("./funcs/%s_%s_tfim_mpo" % (nwires, nlayers))
105+
tc_vg_loaded = tc.keras.load_func("./funcs/%s_%s_tfim_mpo" % (nwires, nlayers))
106106

107107
lr1 = 0.008
108108
lr2 = 0.06
@@ -121,7 +121,7 @@ def vqe_forward(param):
121121
param = tc.backend.implicit_randn(stddev=0.1, shape=[4 * nlayers, nwires])
122122

123123
for j in range(steps):
124-
loss, gr = tc_vag_loaded(param)
124+
loss, gr = tc_vg_loaded(param)
125125
if j < switch:
126126
param = opt.update(gr, param)
127127
else:

tensorcircuit/interfaces.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -219,25 +219,25 @@ def f(param):
219219
:rtype: Callable[..., Any]
220220
"""
221221
if gradient:
222-
vag = backend.value_and_grad(fun, argnums=0)
222+
vg = backend.value_and_grad(fun, argnums=0)
223223
if jit:
224-
vag = backend.jit(vag)
224+
vg = backend.jit(vg)
225225

226-
def scipy_vag(*args: Any, **kws: Any) -> Tuple[Tensor, Tensor]:
226+
def scipy_vg(*args: Any, **kws: Any) -> Tuple[Tensor, Tensor]:
227227
scipy_args = numpy_args_to_backend(args, dtype=dtypestr)
228228
if shape is not None:
229229
scipy_args = list(scipy_args)
230230
scipy_args[0] = backend.reshape(scipy_args[0], shape)
231231
scipy_args = tuple(scipy_args)
232-
vs, gs = vag(*scipy_args, **kws)
232+
vs, gs = vg(*scipy_args, **kws)
233233
scipy_vs = general_args_to_numpy(vs)
234234
gs = backend.reshape(gs, [-1])
235235
scipy_gs = general_args_to_numpy(gs)
236236
scipy_vs = scipy_vs.astype(np.float64)
237237
scipy_gs = scipy_gs.astype(np.float64)
238238
return scipy_vs, scipy_gs
239239

240-
return scipy_vag
240+
return scipy_vg
241241
# no gradient
242242
if jit:
243243
fun = backend.jit(fun)

tests/test_dmcircuit.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -155,8 +155,8 @@ def forward(theta):
155155
return tc.backend.real(tc.backend.sum(c.densitymatrix()))
156156

157157
theta = tc.num_to_tensor(0.2)
158-
vag = tc.backend.value_and_grad(forward)
159-
_, g1 = vag(theta)
158+
vg = tc.backend.value_and_grad(forward)
159+
_, g1 = vg(theta)
160160
assert np.allclose(tc.backend.numpy(g1), 0.199, atol=1e-2)
161161

162162
def forward2(theta):
@@ -172,8 +172,8 @@ def forward2(theta):
172172
return tc.backend.real(tc.backend.sum(c.densitymatrix()))
173173

174174
theta = tc.num_to_tensor(0.2)
175-
vag2 = tc.backend.value_and_grad(forward2)
176-
_, g2 = vag2(theta)
175+
vg2 = tc.backend.value_and_grad(forward2)
176+
_, g2 = vg2(theta)
177177
assert np.allclose(tc.backend.numpy(g2), 0.199, atol=1e-2)
178178

179179

@@ -195,8 +195,8 @@ def forward(p):
195195
)
196196

197197
theta = tc.num_to_tensor(0.1)
198-
vag = tc.backend.value_and_grad(forward)
199-
v, g = vag(theta)
198+
vg = tc.backend.value_and_grad(forward)
199+
v, g = vg(theta)
200200
assert np.allclose(v, -0.6, atol=1e-2)
201201
assert np.allclose(g, 4, atol=1e-2)
202202

0 commit comments

Comments
 (0)