Skip to content

Commit 607fe0e

Browse files
authoredAug 8, 2023
Add files via upload
1 parent 46a16fa commit 607fe0e

File tree

1 file changed

+63
-49
lines changed

1 file changed

+63
-49
lines changed
 

‎docs/source/tutorials/nnvqe.ipynb

+63-49
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@
8484
" n = c._nqubits\n",
8585
" for i in range(n):\n",
8686
" e += lamb * c.expectation((tc.gates.z(), [i])) # <Z_i>\n",
87-
" for i in range(n): \n",
87+
" for i in range(n):\n",
8888
" e += c.expectation(\n",
8989
" (tc.gates.x(), [i]), (tc.gates.x(), [(i + 1) % n])\n",
9090
" ) # <X_i X_{i+1}>\n",
@@ -115,32 +115,33 @@
115115
"metadata": {},
116116
"outputs": [],
117117
"source": [
118-
"def MERA(inp, n, d=1, lamb=1., energy_flag=False): # for single-parameter 1D XXZ model, we fix lamb\n",
118+
"def MERA(\n",
119+
" inp, n, d=1, lamb=1.0, energy_flag=False\n",
120+
"): # for single-parameter 1D XXZ model, we fix lamb\n",
119121
" params = K.cast(inp[\"params\"], \"complex128\")\n",
120-
" delta = K.cast(inp[\"delta\"], \"complex128\") \n",
122+
" delta = K.cast(inp[\"delta\"], \"complex128\")\n",
121123
" c = tc.Circuit(n)\n",
122-
" \n",
124+
"\n",
123125
" idx = 0\n",
124126
"\n",
125127
" for i in range(n):\n",
126128
" c.rx(i, theta=params[3 * i])\n",
127129
" c.rz(i, theta=params[3 * i + 1])\n",
128130
" c.rx(i, theta=params[3 * i + 2])\n",
129131
" idx += 3 * n\n",
130-
" \n",
131132
"\n",
132133
" for n_layer in range(1, int(np.log2(n)) + 1):\n",
133-
" n_qubit = 2**n_layer # number of qubits involving\n",
134+
" n_qubit = 2**n_layer # number of qubits involving\n",
134135
" step = int(n / n_qubit)\n",
135136
"\n",
136-
" for _ in range(d): # circuit depth\n",
137-
" # even \n",
137+
" for _ in range(d): # circuit depth\n",
138+
" # even\n",
138139
" for i in range(step, n - step, 2 * step):\n",
139140
" c.rxx(i, i + step, theta=params[idx])\n",
140141
" c.rzz(i, i + step, theta=params[idx + 1])\n",
141142
" idx += 2\n",
142-
" \n",
143-
" # odd \n",
143+
"\n",
144+
" # odd\n",
144145
" for i in range(0, n, 2 * step):\n",
145146
" c.rxx(i, i + step, theta=params[idx])\n",
146147
" c.rzz(i, i + step, theta=params[idx + 1])\n",
@@ -151,11 +152,11 @@
151152
" c.rx(i, theta=params[idx])\n",
152153
" c.rz(i, theta=params[idx + 1])\n",
153154
" idx += 2\n",
154-
" \n",
155+
"\n",
155156
" if energy_flag:\n",
156-
" return energy(c, lamb, delta) # return Hamiltonian energy expectation\n",
157+
" return energy(c, lamb, delta) # return Hamiltonian energy expectation\n",
157158
" else:\n",
158-
" return c, idx # return the circuit & number of circuit parameters"
159+
" return c, idx # return the circuit & number of circuit parameters"
159160
]
160161
},
161162
{
@@ -2895,7 +2896,7 @@
28952896
"# circuit visulization\n",
28962897
"n = 8\n",
28972898
"d = 1\n",
2898-
"cirq, idx = MERA({\"params\": np.zeros(3000), \"delta\": 0.}, n, d, 1.)\n",
2899+
"cirq, idx = MERA({\"params\": np.zeros(3000), \"delta\": 0.0}, n, d, 1.0)\n",
28992900
"print(\"The number of parameters is\", idx)\n",
29002901
"cirq.draw()"
29012902
]
@@ -2919,26 +2920,32 @@
29192920
"outputs": [],
29202921
"source": [
29212922
"def NN_MERA(n, d, lamb, NN_shape, stddev):\n",
2922-
" input = tf.keras.layers.Input(shape=[1]) # input layer\n",
2923+
" input = tf.keras.layers.Input(shape=[1]) # input layer\n",
29232924
"\n",
29242925
" x = tf.keras.layers.Dense(\n",
2925-
" units=NN_shape, \n",
2926+
" units=NN_shape,\n",
29262927
" kernel_initializer=tf.keras.initializers.RandomNormal(stddev=stddev),\n",
2927-
" activation=\"ReLU\"\n",
2928-
" )(input) # hidden layer\n",
2928+
" activation=\"ReLU\",\n",
2929+
" )(\n",
2930+
" input\n",
2931+
" ) # hidden layer\n",
29292932
"\n",
2930-
" x = tf.keras.layers.Dropout(0.05)(x) # dropout layer\n",
2933+
" x = tf.keras.layers.Dropout(0.05)(x) # dropout layer\n",
29312934
"\n",
2932-
" _, idx = MERA({\"params\": np.zeros(3000), \"delta\": 0.}, n, d, 1., energy_flag=False)\n",
2935+
" _, idx = MERA(\n",
2936+
" {\"params\": np.zeros(3000), \"delta\": 0.0}, n, d, 1.0, energy_flag=False\n",
2937+
" )\n",
29332938
" params = tf.keras.layers.Dense(\n",
2934-
" units=idx, \n",
2935-
" kernel_initializer=tf.keras.initializers.RandomNormal(stddev=stddev),\n",
2936-
" activation=\"sigmoid\"\n",
2937-
" )(x) # output layer\n",
2938-
" \n",
2939-
" qlayer = tc.KerasLayer(partial(MERA, n=n, d=d, lamb=lamb, energy_flag=True)) # PQC\n",
2939+
" units=idx,\n",
2940+
" kernel_initializer=tf.keras.initializers.RandomNormal(stddev=stddev),\n",
2941+
" activation=\"sigmoid\",\n",
2942+
" )(\n",
2943+
" x\n",
2944+
" ) # output layer\n",
2945+
"\n",
2946+
" qlayer = tc.KerasLayer(partial(MERA, n=n, d=d, lamb=lamb, energy_flag=True)) # PQC\n",
29402947
"\n",
2941-
" output = qlayer({\"params\": 6.3 * params, \"delta\": input}) # NN-VQE output\n",
2948+
" output = qlayer({\"params\": 6.3 * params, \"delta\": input}) # NN-VQE output\n",
29422949
"\n",
29432950
" m = tf.keras.Model(inputs=input, outputs=output)\n",
29442951
"\n",
@@ -2965,24 +2972,24 @@
29652972
},
29662973
"outputs": [],
29672974
"source": [
2968-
"def train(n, d, lamb, delta, NN_shape, maxiter=10000, lr=0.005, stddev=1.):\n",
2975+
"def train(n, d, lamb, delta, NN_shape, maxiter=10000, lr=0.005, stddev=1.0):\n",
29692976
" exp_lr = tf.keras.optimizers.schedules.ExponentialDecay(\n",
2970-
" initial_learning_rate=lr, decay_steps=1000, decay_rate=0.7\n",
2971-
" )\n",
2972-
" opt = tf.keras.optimizers.Adam(exp_lr) # optimizer\n",
2977+
" initial_learning_rate=lr, decay_steps=1000, decay_rate=0.7\n",
2978+
" )\n",
2979+
" opt = tf.keras.optimizers.Adam(exp_lr) # optimizer\n",
29732980
"\n",
29742981
" m = NN_MERA(n, d, lamb, NN_shape, stddev)\n",
29752982
" for i in range(maxiter):\n",
29762983
" with tf.GradientTape() as tape:\n",
29772984
" e = tf.zeros([1], dtype=tf.float64)\n",
29782985
" for de in delta:\n",
2979-
" e += m(K.reshape(de, [1])) # sum up energies of all training points\n",
2986+
" e += m(K.reshape(de, [1])) # sum up energies of all training points\n",
29802987
" grads = tape.gradient(e, m.variables)\n",
29812988
" opt.apply_gradients(zip(grads, m.variables))\n",
29822989
" if i % 500 == 0:\n",
29832990
" print(\"epoch\", i, \":\", e)\n",
2984-
" \n",
2985-
" m.save_weights('NN-VQE.weights.h5') # save the trained model"
2991+
"\n",
2992+
" m.save_weights(\"NN-VQE.weights.h5\") # save the trained model"
29862993
]
29872994
},
29882995
{
@@ -3006,16 +3013,16 @@
30063013
}
30073014
],
30083015
"source": [
3009-
"n = 8 # number of qubits\n",
3010-
"d = 2 # circuit depth\n",
3011-
"lamb = 0.75 # fixed\n",
3012-
"delta = np.linspace(-3.0, 3.0, 20, dtype=\"complex128\") # training set\n",
3013-
"NN_shape = 20 # node number of the hidden layer\n",
3014-
"maxiter = 2500 # maximum iteration for the optimization\n",
3015-
"lr = 0.009 # learning rate\n",
3016-
"stddev = 0.1 # the initial standard deviation of the NN\n",
3016+
"n = 8 # number of qubits\n",
3017+
"d = 2 # circuit depth\n",
3018+
"lamb = 0.75 # fixed\n",
3019+
"delta = np.linspace(-3.0, 3.0, 20, dtype=\"complex128\") # training set\n",
3020+
"NN_shape = 20 # node number of the hidden layer\n",
3021+
"maxiter = 2500 # maximum iteration for the optimization\n",
3022+
"lr = 0.009 # learning rate\n",
3023+
"stddev = 0.1 # the initial standard deviation of the NN\n",
30173024
"\n",
3018-
"with tf.device('/cpu:0'):\n",
3025+
"with tf.device(\"/cpu:0\"):\n",
30193026
" train(n, d, lamb, delta, NN_shape=NN_shape, maxiter=maxiter, lr=lr, stddev=stddev)"
30203027
]
30213028
},
@@ -3052,10 +3059,10 @@
30523059
}
30533060
],
30543061
"source": [
3055-
"test_delta = np.linspace(-4.0, 4.0, 201) # test set\n",
3062+
"test_delta = np.linspace(-4.0, 4.0, 201) # test set\n",
30563063
"test_energies = tf.zeros_like(test_delta).numpy()\n",
30573064
"m = NN_MERA(n, d, lamb, NN_shape, stddev)\n",
3058-
"m.load_weights('DNN-MERA_2[20](-3.0,3.0,20)_drop05.weights.h5')\n",
3065+
"m.load_weights(\"DNN-MERA_2[20](-3.0,3.0,20)_drop05.weights.h5\")\n",
30593066
"for i, de in tqdm(enumerate(test_delta)):\n",
30603067
" test_energies[i] = m(K.reshape(de, [1]))"
30613068
]
@@ -3077,9 +3084,11 @@
30773084
"metadata": {},
30783085
"outputs": [],
30793086
"source": [
3080-
"analytical_energies = [] # analytical result\n",
3087+
"analytical_energies = [] # analytical result\n",
30813088
"for i in test_delta:\n",
3082-
" h = quimb.tensor.tensor_builder.MPO_ham_XXZ(n, i*4, jxy=4., bz=2.*0.75, S=0.5, cyclic=True) \n",
3089+
" h = quimb.tensor.tensor_builder.MPO_ham_XXZ(\n",
3090+
" n, i * 4, jxy=4.0, bz=2.0 * 0.75, S=0.5, cyclic=True\n",
3091+
" )\n",
30833092
" h = h.to_dense()\n",
30843093
" analytical_energies.append(np.min(quimb.eigvalsh(h)))"
30853094
]
@@ -3956,10 +3965,15 @@
39563965
],
39573966
"source": [
39583967
"# relative error\n",
3959-
"plt.plot(test_delta, (test_energies - analytical_energies) / np.abs(analytical_energies), '-', color='b')\n",
3968+
"plt.plot(\n",
3969+
" test_delta,\n",
3970+
" (test_energies - analytical_energies) / np.abs(analytical_energies),\n",
3971+
" \"-\",\n",
3972+
" color=\"b\",\n",
3973+
")\n",
39603974
"plt.xlabel(\"Delta\", fontsize=14)\n",
39613975
"plt.ylabel(\"GS Relative Error\", fontsize=14)\n",
3962-
"plt.axvspan(-3.0, 3.0, color='darkgrey', alpha=0.5) # training set span\n",
3976+
"plt.axvspan(-3.0, 3.0, color=\"darkgrey\", alpha=0.5) # training set span\n",
39633977
"plt.show()"
39643978
]
39653979
},

0 commit comments

Comments
 (0)
Please sign in to comment.