-
Notifications
You must be signed in to change notification settings - Fork 81
/
Copy pathsimple_qaoa.py
83 lines (60 loc) · 2.15 KB
/
simple_qaoa.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
"""
A plain QAOA optimization example with given graphs using networkx.
"""
import sys
sys.path.insert(0, "../")
import networkx as nx
import tensorflow as tf
import tensorcircuit as tc
K = tc.set_backend("tensorflow")
## 1. define the graph
def dict2graph(d):
g = nx.to_networkx_graph(d)
for e in g.edges:
if not g[e[0]][e[1]].get("weight"):
g[e[0]][e[1]]["weight"] = 1.0
return g
# a graph instance
example_graph_dict = {
0: {1: {"weight": 1.0}, 7: {"weight": 1.0}, 3: {"weight": 1.0}},
1: {0: {"weight": 1.0}, 2: {"weight": 1.0}, 3: {"weight": 1.0}},
2: {1: {"weight": 1.0}, 3: {"weight": 1.0}, 5: {"weight": 1.0}},
4: {7: {"weight": 1.0}, 6: {"weight": 1.0}, 5: {"weight": 1.0}},
7: {4: {"weight": 1.0}, 6: {"weight": 1.0}, 0: {"weight": 1.0}},
3: {1: {"weight": 1.0}, 2: {"weight": 1.0}, 0: {"weight": 1.0}},
6: {7: {"weight": 1.0}, 4: {"weight": 1.0}, 5: {"weight": 1.0}},
5: {6: {"weight": 1.0}, 4: {"weight": 1.0}, 2: {"weight": 1.0}},
}
example_graph = dict2graph(example_graph_dict)
# 2. define the quantum ansatz
nlayers = 3
def QAOAansatz(gamma, beta, g=example_graph):
n = len(g.nodes)
c = tc.Circuit(n)
for i in range(n):
c.H(i)
for j in range(nlayers):
for e in g.edges:
c.exp1(
e[0],
e[1],
unitary=tc.gates._zz_matrix,
theta=g[e[0]][e[1]].get("weight", 1.0) * gamma[j],
)
for i in range(n):
c.rx(i, theta=beta[j])
# calculate the loss function, max cut
loss = 0.0
for e in g.edges:
loss += c.expectation_ps(z=[e[0], e[1]])
return K.real(loss)
# 3. get compiled function for QAOA ansatz and its gradient
QAOA_vg = K.jit(K.value_and_grad(QAOAansatz, argnums=(0, 1)), static_argnums=2)
# 4. optimization loop
beta = K.implicit_randn(shape=[nlayers], stddev=0.1)
gamma = K.implicit_randn(shape=[nlayers], stddev=0.1)
opt = K.optimizer(tf.keras.optimizers.Adam(1e-2))
for i in range(100):
loss, grads = QAOA_vg(gamma, beta, example_graph)
print(K.numpy(loss))
gamma, beta = opt.update(grads, [gamma, beta]) # gradient descent