|
84 | 84 | " n = c._nqubits\n",
|
85 | 85 | " for i in range(n):\n",
|
86 | 86 | " e += lamb * c.expectation((tc.gates.z(), [i])) # <Z_i>\n",
|
87 |
| - " for i in range(n): \n", |
| 87 | + " for i in range(n):\n", |
88 | 88 | " e += c.expectation(\n",
|
89 | 89 | " (tc.gates.x(), [i]), (tc.gates.x(), [(i + 1) % n])\n",
|
90 | 90 | " ) # <X_i X_{i+1}>\n",
|
|
115 | 115 | "metadata": {},
|
116 | 116 | "outputs": [],
|
117 | 117 | "source": [
|
118 |
| - "def MERA(inp, n, d=1, lamb=1., energy_flag=False): # for single-parameter 1D XXZ model, we fix lamb\n", |
| 118 | + "def MERA(\n", |
| 119 | + " inp, n, d=1, lamb=1.0, energy_flag=False\n", |
| 120 | + "): # for single-parameter 1D XXZ model, we fix lamb\n", |
119 | 121 | " params = K.cast(inp[\"params\"], \"complex128\")\n",
|
120 |
| - " delta = K.cast(inp[\"delta\"], \"complex128\") \n", |
| 122 | + " delta = K.cast(inp[\"delta\"], \"complex128\")\n", |
121 | 123 | " c = tc.Circuit(n)\n",
|
122 |
| - " \n", |
| 124 | + "\n", |
123 | 125 | " idx = 0\n",
|
124 | 126 | "\n",
|
125 | 127 | " for i in range(n):\n",
|
126 | 128 | " c.rx(i, theta=params[3 * i])\n",
|
127 | 129 | " c.rz(i, theta=params[3 * i + 1])\n",
|
128 | 130 | " c.rx(i, theta=params[3 * i + 2])\n",
|
129 | 131 | " idx += 3 * n\n",
|
130 |
| - " \n", |
131 | 132 | "\n",
|
132 | 133 | " for n_layer in range(1, int(np.log2(n)) + 1):\n",
|
133 |
| - " n_qubit = 2**n_layer # number of qubits involving\n", |
| 134 | + " n_qubit = 2**n_layer # number of qubits involving\n", |
134 | 135 | " step = int(n / n_qubit)\n",
|
135 | 136 | "\n",
|
136 |
| - " for _ in range(d): # circuit depth\n", |
137 |
| - " # even \n", |
| 137 | + " for _ in range(d): # circuit depth\n", |
| 138 | + " # even\n", |
138 | 139 | " for i in range(step, n - step, 2 * step):\n",
|
139 | 140 | " c.rxx(i, i + step, theta=params[idx])\n",
|
140 | 141 | " c.rzz(i, i + step, theta=params[idx + 1])\n",
|
141 | 142 | " idx += 2\n",
|
142 |
| - " \n", |
143 |
| - " # odd \n", |
| 143 | + "\n", |
| 144 | + " # odd\n", |
144 | 145 | " for i in range(0, n, 2 * step):\n",
|
145 | 146 | " c.rxx(i, i + step, theta=params[idx])\n",
|
146 | 147 | " c.rzz(i, i + step, theta=params[idx + 1])\n",
|
|
151 | 152 | " c.rx(i, theta=params[idx])\n",
|
152 | 153 | " c.rz(i, theta=params[idx + 1])\n",
|
153 | 154 | " idx += 2\n",
|
154 |
| - " \n", |
| 155 | + "\n", |
155 | 156 | " if energy_flag:\n",
|
156 |
| - " return energy(c, lamb, delta) # return Hamiltonian energy expectation\n", |
| 157 | + " return energy(c, lamb, delta) # return Hamiltonian energy expectation\n", |
157 | 158 | " else:\n",
|
158 |
| - " return c, idx # return the circuit & number of circuit parameters" |
| 159 | + " return c, idx # return the circuit & number of circuit parameters" |
159 | 160 | ]
|
160 | 161 | },
|
161 | 162 | {
|
|
2895 | 2896 | "# circuit visulization\n",
|
2896 | 2897 | "n = 8\n",
|
2897 | 2898 | "d = 1\n",
|
2898 |
| - "cirq, idx = MERA({\"params\": np.zeros(3000), \"delta\": 0.}, n, d, 1.)\n", |
| 2899 | + "cirq, idx = MERA({\"params\": np.zeros(3000), \"delta\": 0.0}, n, d, 1.0)\n", |
2899 | 2900 | "print(\"The number of parameters is\", idx)\n",
|
2900 | 2901 | "cirq.draw()"
|
2901 | 2902 | ]
|
|
2919 | 2920 | "outputs": [],
|
2920 | 2921 | "source": [
|
2921 | 2922 | "def NN_MERA(n, d, lamb, NN_shape, stddev):\n",
|
2922 |
| - " input = tf.keras.layers.Input(shape=[1]) # input layer\n", |
| 2923 | + " input = tf.keras.layers.Input(shape=[1]) # input layer\n", |
2923 | 2924 | "\n",
|
2924 | 2925 | " x = tf.keras.layers.Dense(\n",
|
2925 |
| - " units=NN_shape, \n", |
| 2926 | + " units=NN_shape,\n", |
2926 | 2927 | " kernel_initializer=tf.keras.initializers.RandomNormal(stddev=stddev),\n",
|
2927 |
| - " activation=\"ReLU\"\n", |
2928 |
| - " )(input) # hidden layer\n", |
| 2928 | + " activation=\"ReLU\",\n", |
| 2929 | + " )(\n", |
| 2930 | + " input\n", |
| 2931 | + " ) # hidden layer\n", |
2929 | 2932 | "\n",
|
2930 |
| - " x = tf.keras.layers.Dropout(0.05)(x) # dropout layer\n", |
| 2933 | + " x = tf.keras.layers.Dropout(0.05)(x) # dropout layer\n", |
2931 | 2934 | "\n",
|
2932 |
| - " _, idx = MERA({\"params\": np.zeros(3000), \"delta\": 0.}, n, d, 1., energy_flag=False)\n", |
| 2935 | + " _, idx = MERA(\n", |
| 2936 | + " {\"params\": np.zeros(3000), \"delta\": 0.0}, n, d, 1.0, energy_flag=False\n", |
| 2937 | + " )\n", |
2933 | 2938 | " params = tf.keras.layers.Dense(\n",
|
2934 |
| - " units=idx, \n", |
2935 |
| - " kernel_initializer=tf.keras.initializers.RandomNormal(stddev=stddev),\n", |
2936 |
| - " activation=\"sigmoid\"\n", |
2937 |
| - " )(x) # output layer\n", |
2938 |
| - " \n", |
2939 |
| - " qlayer = tc.KerasLayer(partial(MERA, n=n, d=d, lamb=lamb, energy_flag=True)) # PQC\n", |
| 2939 | + " units=idx,\n", |
| 2940 | + " kernel_initializer=tf.keras.initializers.RandomNormal(stddev=stddev),\n", |
| 2941 | + " activation=\"sigmoid\",\n", |
| 2942 | + " )(\n", |
| 2943 | + " x\n", |
| 2944 | + " ) # output layer\n", |
| 2945 | + "\n", |
| 2946 | + " qlayer = tc.KerasLayer(partial(MERA, n=n, d=d, lamb=lamb, energy_flag=True)) # PQC\n", |
2940 | 2947 | "\n",
|
2941 |
| - " output = qlayer({\"params\": 6.3 * params, \"delta\": input}) # NN-VQE output\n", |
| 2948 | + " output = qlayer({\"params\": 6.3 * params, \"delta\": input}) # NN-VQE output\n", |
2942 | 2949 | "\n",
|
2943 | 2950 | " m = tf.keras.Model(inputs=input, outputs=output)\n",
|
2944 | 2951 | "\n",
|
|
2965 | 2972 | },
|
2966 | 2973 | "outputs": [],
|
2967 | 2974 | "source": [
|
2968 |
| - "def train(n, d, lamb, delta, NN_shape, maxiter=10000, lr=0.005, stddev=1.):\n", |
| 2975 | + "def train(n, d, lamb, delta, NN_shape, maxiter=10000, lr=0.005, stddev=1.0):\n", |
2969 | 2976 | " exp_lr = tf.keras.optimizers.schedules.ExponentialDecay(\n",
|
2970 |
| - " initial_learning_rate=lr, decay_steps=1000, decay_rate=0.7\n", |
2971 |
| - " )\n", |
2972 |
| - " opt = tf.keras.optimizers.Adam(exp_lr) # optimizer\n", |
| 2977 | + " initial_learning_rate=lr, decay_steps=1000, decay_rate=0.7\n", |
| 2978 | + " )\n", |
| 2979 | + " opt = tf.keras.optimizers.Adam(exp_lr) # optimizer\n", |
2973 | 2980 | "\n",
|
2974 | 2981 | " m = NN_MERA(n, d, lamb, NN_shape, stddev)\n",
|
2975 | 2982 | " for i in range(maxiter):\n",
|
2976 | 2983 | " with tf.GradientTape() as tape:\n",
|
2977 | 2984 | " e = tf.zeros([1], dtype=tf.float64)\n",
|
2978 | 2985 | " for de in delta:\n",
|
2979 |
| - " e += m(K.reshape(de, [1])) # sum up energies of all training points\n", |
| 2986 | + " e += m(K.reshape(de, [1])) # sum up energies of all training points\n", |
2980 | 2987 | " grads = tape.gradient(e, m.variables)\n",
|
2981 | 2988 | " opt.apply_gradients(zip(grads, m.variables))\n",
|
2982 | 2989 | " if i % 500 == 0:\n",
|
2983 | 2990 | " print(\"epoch\", i, \":\", e)\n",
|
2984 |
| - " \n", |
2985 |
| - " m.save_weights('NN-VQE.weights.h5') # save the trained model" |
| 2991 | + "\n", |
| 2992 | + " m.save_weights(\"NN-VQE.weights.h5\") # save the trained model" |
2986 | 2993 | ]
|
2987 | 2994 | },
|
2988 | 2995 | {
|
|
3006 | 3013 | }
|
3007 | 3014 | ],
|
3008 | 3015 | "source": [
|
3009 |
| - "n = 8 # number of qubits\n", |
3010 |
| - "d = 2 # circuit depth\n", |
3011 |
| - "lamb = 0.75 # fixed\n", |
3012 |
| - "delta = np.linspace(-3.0, 3.0, 20, dtype=\"complex128\") # training set\n", |
3013 |
| - "NN_shape = 20 # node number of the hidden layer\n", |
3014 |
| - "maxiter = 2500 # maximum iteration for the optimization\n", |
3015 |
| - "lr = 0.009 # learning rate\n", |
3016 |
| - "stddev = 0.1 # the initial standard deviation of the NN\n", |
| 3016 | + "n = 8 # number of qubits\n", |
| 3017 | + "d = 2 # circuit depth\n", |
| 3018 | + "lamb = 0.75 # fixed\n", |
| 3019 | + "delta = np.linspace(-3.0, 3.0, 20, dtype=\"complex128\") # training set\n", |
| 3020 | + "NN_shape = 20 # node number of the hidden layer\n", |
| 3021 | + "maxiter = 2500 # maximum iteration for the optimization\n", |
| 3022 | + "lr = 0.009 # learning rate\n", |
| 3023 | + "stddev = 0.1 # the initial standard deviation of the NN\n", |
3017 | 3024 | "\n",
|
3018 |
| - "with tf.device('/cpu:0'):\n", |
| 3025 | + "with tf.device(\"/cpu:0\"):\n", |
3019 | 3026 | " train(n, d, lamb, delta, NN_shape=NN_shape, maxiter=maxiter, lr=lr, stddev=stddev)"
|
3020 | 3027 | ]
|
3021 | 3028 | },
|
|
3052 | 3059 | }
|
3053 | 3060 | ],
|
3054 | 3061 | "source": [
|
3055 |
| - "test_delta = np.linspace(-4.0, 4.0, 201) # test set\n", |
| 3062 | + "test_delta = np.linspace(-4.0, 4.0, 201) # test set\n", |
3056 | 3063 | "test_energies = tf.zeros_like(test_delta).numpy()\n",
|
3057 | 3064 | "m = NN_MERA(n, d, lamb, NN_shape, stddev)\n",
|
3058 |
| - "m.load_weights('DNN-MERA_2[20](-3.0,3.0,20)_drop05.weights.h5')\n", |
| 3065 | + "m.load_weights(\"DNN-MERA_2[20](-3.0,3.0,20)_drop05.weights.h5\")\n", |
3059 | 3066 | "for i, de in tqdm(enumerate(test_delta)):\n",
|
3060 | 3067 | " test_energies[i] = m(K.reshape(de, [1]))"
|
3061 | 3068 | ]
|
|
3077 | 3084 | "metadata": {},
|
3078 | 3085 | "outputs": [],
|
3079 | 3086 | "source": [
|
3080 |
| - "analytical_energies = [] # analytical result\n", |
| 3087 | + "analytical_energies = [] # analytical result\n", |
3081 | 3088 | "for i in test_delta:\n",
|
3082 |
| - " h = quimb.tensor.tensor_builder.MPO_ham_XXZ(n, i*4, jxy=4., bz=2.*0.75, S=0.5, cyclic=True) \n", |
| 3089 | + " h = quimb.tensor.tensor_builder.MPO_ham_XXZ(\n", |
| 3090 | + " n, i * 4, jxy=4.0, bz=2.0 * 0.75, S=0.5, cyclic=True\n", |
| 3091 | + " )\n", |
3083 | 3092 | " h = h.to_dense()\n",
|
3084 | 3093 | " analytical_energies.append(np.min(quimb.eigvalsh(h)))"
|
3085 | 3094 | ]
|
|
3956 | 3965 | ],
|
3957 | 3966 | "source": [
|
3958 | 3967 | "# relative error\n",
|
3959 |
| - "plt.plot(test_delta, (test_energies - analytical_energies) / np.abs(analytical_energies), '-', color='b')\n", |
| 3968 | + "plt.plot(\n", |
| 3969 | + " test_delta,\n", |
| 3970 | + " (test_energies - analytical_energies) / np.abs(analytical_energies),\n", |
| 3971 | + " \"-\",\n", |
| 3972 | + " color=\"b\",\n", |
| 3973 | + ")\n", |
3960 | 3974 | "plt.xlabel(\"Delta\", fontsize=14)\n",
|
3961 | 3975 | "plt.ylabel(\"GS Relative Error\", fontsize=14)\n",
|
3962 |
| - "plt.axvspan(-3.0, 3.0, color='darkgrey', alpha=0.5) # training set span\n", |
| 3976 | + "plt.axvspan(-3.0, 3.0, color=\"darkgrey\", alpha=0.5) # training set span\n", |
3963 | 3977 | "plt.show()"
|
3964 | 3978 | ]
|
3965 | 3979 | },
|
|
0 commit comments