Skip to content

Commit d1c0fb3

Browse files
committedFeb 3, 2021
Bring back tf.constant close #38
custom log path fix quantity shape
1 parent ebb13e4 commit d1c0fb3

16 files changed

+96
-95
lines changed
 

‎c3/c3objs.py

+9-5
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
import numpy as np
55
import tensorflow as tf
66
from c3.utils.utils import num3str
7+
from tensorflow.python.framework import ops
78

89

910
class C3obj:
@@ -71,6 +72,7 @@ class Quantity:
7172
def __init__(
7273
self, value, min_val=None, max_val=None, unit="undefined", symbol=r"\alpha"
7374
):
75+
value = np.array(value)
7476
if "pi" in unit:
7577
pref = np.pi
7678
if "2pi" in unit:
@@ -89,7 +91,6 @@ def __init__(
8991
if hasattr(value, "shape"):
9092
self.shape = value.shape
9193
self.length = int(np.prod(value.shape))
92-
self.__float__ = None
9394
else:
9495
self.shape = (1,)
9596
self.length = 1
@@ -102,9 +103,9 @@ def asdict(self) -> dict:
102103
"""
103104
pref = self.pref
104105
return {
105-
"value": self.numpy(),
106-
"min_val": self.offset / pref,
107-
"max_val": (self.scale / pref + self.offset / pref),
106+
"value": self.numpy().tolist(),
107+
"min_val": (self.offset / pref).tolist(),
108+
"max_val": (self.scale / pref + self.offset / pref).tolist(),
108109
"unit": self.unit,
109110
"symbol": self.symbol,
110111
}
@@ -202,7 +203,10 @@ def set_value(self, val) -> None:
202203
)
203204
# TODO if we want we can extend bounds when force flag is given
204205
else:
205-
self.value = tf.Variable(tmp, dtype=tf.float64)
206+
if isinstance(val, ops.EagerTensor):
207+
self.value = tf.cast(2 * (val * self.pref - self.offset) / self.scale - 1, tf.float64)
208+
else:
209+
self.value = tf.constant(tmp, dtype=tf.float64)
206210

207211
def get_opt_value(self) -> np.ndarray:
208212
""" Get an optimizer friendly representation of the value."""

‎c3/experiment.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ def __init__(self, pmap: ParameterMap = None):
4848
self.unitaries: dict = {}
4949
self.dUs: dict = {}
5050
self.created_by = None
51+
self.logdir: str = None
5152

5253
def set_created_by(self, config):
5354
"""
@@ -278,7 +279,7 @@ def get_gates(self):
278279
ctrls["carrier"].params["framechange"].get_value(),
279280
tf.complex128,
280281
)
281-
t_final = tf.Variable(instr.t_end - instr.t_start, dtype=tf.complex128)
282+
t_final = tf.constant(instr.t_end - instr.t_start, dtype=tf.complex128)
282283
FR = model.get_Frame_Rotation(t_final, freqs, framechanges)
283284
if model.lindbladian:
284285
SFR = tf_utils.tf_super(FR)
@@ -295,7 +296,7 @@ def get_gates(self):
295296
for line, ctrls in instr.comps.items():
296297
amp, sum = generator.devices["awg"].get_average_amp()
297298
amps[line] = tf.cast(amp, tf.complex128)
298-
t_final = tf.Variable(
299+
t_final = tf.constant(
299300
instr.t_end - instr.t_start, dtype=tf.complex128
300301
)
301302
dephasing_channel = model.get_dephasing_channel(t_final, amps)
@@ -331,7 +332,7 @@ def propagation(self, signal: dict, gate):
331332
signals.append(signal[key]["values"])
332333
ts = signal[key]["ts"]
333334
hks.append(hctrls[key])
334-
dt = tf.Variable(ts[1].numpy() - ts[0].numpy(), dtype=tf.complex128)
335+
dt = tf.constant(ts[1].numpy() - ts[0].numpy(), dtype=tf.complex128)
335336

336337
if model.lindbladian:
337338
col_ops = model.get_Lindbladians()

‎c3/generator/devices.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -100,8 +100,8 @@ def create_ts(self, t_start: np.float64, t_end: np.float64, centered: bool = Tru
100100
else:
101101
offset = 0
102102
num = self.slice_num + 1
103-
t_start = tf.Variable(t_start + offset, dtype=tf.float64)
104-
t_end = tf.Variable(t_end - offset, dtype=tf.float64)
103+
t_start = tf.constant(t_start + offset, dtype=tf.float64)
104+
t_end = tf.constant(t_end - offset, dtype=tf.float64)
105105
ts = tf.linspace(t_start, t_end, num)
106106
return ts
107107

@@ -451,7 +451,7 @@ def process(self, instr, chan, iq_signal):
451451
"""
452452
n_ts = tf.floor(self.params["rise_time"].get_value() * self.resolution)
453453
ts = tf.linspace(
454-
tf.Variable(0.0, dtype=tf.float64),
454+
tf.constant(0.0, dtype=tf.float64),
455455
self.params["rise_time"].get_value(),
456456
tf.cast(n_ts, tf.int32),
457457
)

‎c3/generator/generator.py

+4-1
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@ def __init__(
4242
self.chains = chains
4343
self.__check_signal_chains()
4444
self.resolution = resolution
45+
self.gen_stacked_signals = None
4546

4647
def __check_signal_chains(self) -> None:
4748
for channel, chain in self.chains.items():
@@ -111,9 +112,11 @@ def generate_signals(self, instr: Instruction) -> dict:
111112
112113
"""
113114
gen_signal = {}
115+
if self.gen_stacked_signals:
116+
del self.gen_stacked_signals
114117
gen_stacked_signals: dict = dict()
115118
for chan in instr.comps:
116-
signal_stack: List[tf.Variable] = []
119+
signal_stack: List[tf.constant] = []
117120
gen_stacked_signals[chan] = []
118121
for dev_id in self.chains[chan]:
119122
dev = self.devices[dev_id]

‎c3/libraries/fidelities.py

+10-10
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ def state_transfer_infid(U_dict: dict, gate: str, index, dims, psi_0, proj: bool
9999
projection = "fulluni"
100100
if proj:
101101
projection = "wzeros"
102-
U_ideal = tf.Variable(
102+
U_ideal = tf.constant(
103103
perfect_gate(gate, index, dims, projection), dtype=tf.complex128
104104
)
105105
psi_ideal = tf.matmul(U_ideal, psi_0)
@@ -138,7 +138,7 @@ def unitary_infid(U_dict: dict, gate: str, index, dims, proj: bool):
138138
if proj:
139139
projection = "wzeros"
140140
fid_lvls = 2 ** len(index)
141-
U_ideal = tf.Variable(
141+
U_ideal = tf.constant(
142142
perfect_gate(gate, index, dims, projection), dtype=tf.complex128
143143
)
144144
infid = 1 - tf_unitary_overlap(U, U_ideal, lvls=fid_lvls)
@@ -206,7 +206,7 @@ def lindbladian_unitary_infid(U_dict: dict, gate: str, index, dims, proj: bool):
206206
projection = "wzeros"
207207
fid_lvls = 2 ** len(index)
208208
U_ideal = tf_super(
209-
tf.Variable(perfect_gate(gate, index, dims, projection), dtype=tf.complex128)
209+
tf.constant(perfect_gate(gate, index, dims, projection), dtype=tf.complex128)
210210
)
211211
infid = 1 - tf_superoper_unitary_overlap(U, U_ideal, lvls=fid_lvls)
212212
return infid
@@ -258,7 +258,7 @@ def average_infid(U_dict: dict, gate: str, index, dims, proj=True):
258258
Project to computational subspace
259259
"""
260260
U = U_dict[gate]
261-
U_ideal = tf.Variable(
261+
U_ideal = tf.constant(
262262
perfect_gate(gate, index, dims=[2] * len(dims)), dtype=tf.complex128
263263
)
264264
infid = 1 - tf_average_fidelity(U, U_ideal, lvls=dims)
@@ -338,7 +338,7 @@ def lindbladian_average_infid(U_dict: dict, gate: str, index, dims, proj=True):
338338
Project to computational subspace
339339
"""
340340
U = U_dict[gate]
341-
ideal = tf.Variable(
341+
ideal = tf.constant(
342342
perfect_gate(gate, index, dims=[2] * len(dims)), dtype=tf.complex128
343343
)
344344
U_ideal = tf_super(ideal)
@@ -432,15 +432,15 @@ def populations(state, lindbladian):
432432
def population(U_dict: dict, lvl: int, gate: str):
433433
U = U_dict[gate]
434434
lvls = U.shape[0]
435-
psi_0 = tf.Variable(basis(lvls, 0), dtype=tf.complex128)
435+
psi_0 = tf.constant(basis(lvls, 0), dtype=tf.complex128)
436436
psi_actual = tf.matmul(U, psi_0)
437437
return populations(psi_actual, lindbladian=False)[lvl]
438438

439439

440440
def lindbladian_population(U_dict: dict, lvl: int, gate: str):
441441
U = U_dict[gate]
442442
lvls = int(np.sqrt(U.shape[0]))
443-
psi_0 = tf.Variable(basis(lvls, 0), dtype=tf.complex128)
443+
psi_0 = tf.constant(basis(lvls, 0), dtype=tf.complex128)
444444
dv_0 = tf_dm_to_vec(tf_state_to_dm(psi_0))
445445
dv_actual = tf.matmul(U, dv_0)
446446
return populations(dv_actual, lindbladian=True)[lvl]
@@ -460,7 +460,7 @@ def RB(
460460
gate = list(U_dict.keys())[0]
461461
U = U_dict[gate]
462462
dim = int(U.shape[0])
463-
psi_init = tf.Variable(basis(dim, 0), dtype=tf.complex128)
463+
psi_init = tf.constant(basis(dim, 0), dtype=tf.complex128)
464464
if logspace:
465465
lengths = np.rint(
466466
np.logspace(np.log10(min_length), np.log10(max_length), num=num_lengths)
@@ -554,7 +554,7 @@ def leakage_RB(
554554
gate = list(U_dict.keys())[0]
555555
U = U_dict[gate]
556556
dim = int(U.shape[0])
557-
psi_init = tf.Variable(basis(dim, 0), dtype=tf.complex128)
557+
psi_init = tf.constant(basis(dim, 0), dtype=tf.complex128)
558558
if logspace:
559559
lengths = np.rint(
560560
np.logspace(np.log10(min_length), np.log10(max_length), num=num_lengths)
@@ -701,7 +701,7 @@ def orbit_infid(
701701
infids = []
702702
for U in Us:
703703
dim = int(U.shape[0])
704-
psi_init = tf.Variable(basis(dim, 0), dtype=tf.complex128)
704+
psi_init = tf.constant(basis(dim, 0), dtype=tf.complex128)
705705
psi_actual = tf.matmul(U, psi_init)
706706
pop0 = tf_abs(psi_actual[0]) ** 2
707707
p1 = 1 - pop0

‎c3/optimizers/c1.py

+14-21
Original file line numberDiff line numberDiff line change
@@ -36,17 +36,17 @@ class C1(Optimizer):
3636
"""
3737

3838
def __init__(
39-
self,
40-
dir_path,
41-
fid_func,
42-
fid_subspace,
43-
pmap,
44-
callback_fids=[],
45-
algorithm=None,
46-
store_unitaries=False,
47-
options={},
48-
run_name=None,
49-
interactive=True,
39+
self,
40+
dir_path,
41+
fid_func,
42+
fid_subspace,
43+
pmap,
44+
callback_fids=[],
45+
algorithm=None,
46+
store_unitaries=False,
47+
options={},
48+
run_name=None,
49+
interactive=True,
5050
) -> None:
5151
super().__init__(
5252
pmap=pmap,
@@ -65,14 +65,6 @@ def __init__(
6565
def log_setup(self) -> None:
6666
"""
6767
Create the folders to store data.
68-
69-
Parameters
70-
----------
71-
dir_path : str
72-
Filepath
73-
run_name : str
74-
User specified name for the run
75-
7668
"""
7769
dir_path = os.path.abspath(self.__dir_path)
7870
run_name = self.__run_name
@@ -90,11 +82,12 @@ def load_model_parameters(self, adjust_exp: str) -> None:
9082
self.pmap.model.update_model()
9183
shutil.copy(adjust_exp, os.path.join(self.logdir, "adjust_exp.log"))
9284

93-
def optimize_controls(self) -> None:
85+
def optimize_controls(self, setup_log: bool = True) -> None:
9486
"""
9587
Apply a search algorithm to your gateset given a fidelity function.
9688
"""
97-
self.log_setup()
89+
if setup_log:
90+
self.log_setup()
9891
self.start_log()
9992
self.exp.set_enable_store_unitaries(self.store_unitaries, self.logdir)
10093
print(f"C3:STATUS:Saving as: {os.path.abspath(self.logdir + self.logname)}")

‎c3/optimizers/optimizer.py

+15-21
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,6 @@ class Optimizer:
1818
----------
1919
algorithm : callable
2020
From the algorithm library
21-
plot_dynamics : boolean
22-
Save plots of time-resolved dynamics in dir_path
23-
plot_pulses : boolean
24-
Save plots of control signals
2521
store_unitaries : boolean
2622
Store propagators as text and pickle
2723
"""
@@ -42,7 +38,7 @@ def __init__(
4238
self.created_by = None
4339
self.logname = None
4440
self.options = None
45-
self.dir_path = None
41+
self.__dir_path = None
4642
self.logdir = None
4743
self.set_algorithm(algorithm)
4844

@@ -62,24 +58,22 @@ def replace_logdir(self, new_logdir):
6258
new_logdir
6359
6460
"""
65-
try:
66-
old_logdir = self.logdir
67-
except AttributeError:
68-
old_logdir = None
69-
pass
61+
old_logdir = self.logdir
7062
self.logdir = new_logdir
63+
64+
if old_logdir is None:
65+
return
66+
7167
try:
72-
os.remove(os.path.join(self.dir_path, "recent"))
68+
os.remove(os.path.join(self.__dir_path, "recent"))
7369
except FileNotFoundError:
7470
pass
75-
except AttributeError as e:
76-
Warning(e)
77-
# os.remove(self.dir_path + self.string)
78-
if old_logdir:
79-
try:
80-
os.rmdir(old_logdir)
81-
except OSError:
82-
pass
71+
# os.remove(self.__dir_path + self.string)
72+
73+
try:
74+
os.rmdir(old_logdir)
75+
except OSError:
76+
pass
8377

8478
def set_exp(self, exp: Experiment) -> None:
8579
self.exp = exp
@@ -235,7 +229,7 @@ def fct_to_min(
235229
"""
236230

237231
if isinstance(input_parameters, np.ndarray):
238-
current_params = tf.Variable(input_parameters)
232+
current_params = tf.constant(input_parameters)
239233
goal = self.goal_run(current_params)
240234
self.log_parameters()
241235
goal = float(goal)
@@ -261,7 +255,7 @@ def fct_to_min_autograd(self, x):
261255
float
262256
Value of the goal function.
263257
"""
264-
current_params = tf.Variable(x)
258+
current_params = tf.constant(x)
265259
goal, grad = self.goal_run_with_grad(current_params)
266260
if isinstance(grad, tf.Tensor):
267261
grad = grad.numpy()

‎c3/optimizers/sensitivity.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -245,15 +245,15 @@ def goal_run(self, current_params):
245245
goal = neg_loglkh_multinom_norm(
246246
m_vals,
247247
tf.stack(sim_vals),
248-
tf.Variable(m_stds, dtype=tf.float64),
249-
tf.Variable(m_shots, dtype=tf.float64),
248+
tf.constant(m_stds, dtype=tf.float64),
249+
tf.constant(m_shots, dtype=tf.float64),
250250
)
251251
else:
252252
goal = g_LL_prime(
253253
m_vals,
254254
tf.stack(sim_vals),
255-
tf.Variable(m_stds, dtype=tf.float64),
256-
tf.Variable(m_shots, dtype=tf.float64),
255+
tf.constant(m_stds, dtype=tf.float64),
256+
tf.constant(m_shots, dtype=tf.float64),
257257
)
258258
goals.append(goal.numpy())
259259
seq_weigths.append(num_seqs)

‎c3/parametermap.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
from c3.c3objs import Quantity
99
from c3.signal.gates import Instruction
1010
from c3.signal.pulse import components as comp_lib
11+
from typing import Union
1112

1213

1314
class ParameterMap:
@@ -250,7 +251,7 @@ def get_parameters_scaled(self) -> np.ndarray:
250251
# TODO is there a reason to not return a tensorflow array
251252
return np.concatenate(values, axis=0).flatten()
252253

253-
def set_parameters_scaled(self, values: tf.Variable) -> None:
254+
def set_parameters_scaled(self, values: Union[tf.constant, tf.Variable]) -> None:
254255
"""
255256
Set the values in the original instruction class. This fuction should only be
256257
called by an optimizer. Are you an optimizer?
@@ -259,8 +260,6 @@ def set_parameters_scaled(self, values: tf.Variable) -> None:
259260
----------
260261
values: list
261262
List of parameter values. Matrix valued parameters need to be flattened.
262-
opt_map: list
263-
Corresponding identifiers for the parameter values.
264263
265264
"""
266265
model_updated = False

‎c3/system/chip.py

+10-10
Original file line numberDiff line numberDiff line change
@@ -118,10 +118,10 @@ def init_Hs(self, ann_oper):
118118
119119
"""
120120
resonator = hamiltonians["resonator"]
121-
self.Hs["freq"] = tf.Variable(resonator(ann_oper), dtype=tf.complex128)
121+
self.Hs["freq"] = tf.constant(resonator(ann_oper), dtype=tf.complex128)
122122
if self.hilbert_dim > 2:
123123
duffing = hamiltonians["duffing"]
124-
self.Hs["anhar"] = tf.Variable(duffing(ann_oper), dtype=tf.complex128)
124+
self.Hs["anhar"] = tf.constant(duffing(ann_oper), dtype=tf.complex128)
125125

126126
def get_Hamiltonian(self):
127127
"""
@@ -216,7 +216,7 @@ def init_Hs(self, ann_oper):
216216
Annihilation operator in the full Hilbert space.
217217
218218
"""
219-
self.Hs["freq"] = tf.Variable(
219+
self.Hs["freq"] = tf.constant(
220220
hamiltonians["resonator"](ann_oper), dtype=tf.complex128
221221
)
222222

@@ -317,10 +317,10 @@ def get_freq(self):
317317

318318
def init_Hs(self, ann_oper):
319319
resonator = hamiltonians["resonator"]
320-
self.Hs["freq"] = tf.Variable(resonator(ann_oper), dtype=tf.complex128)
320+
self.Hs["freq"] = tf.constant(resonator(ann_oper), dtype=tf.complex128)
321321
if self.hilbert_dim > 2:
322322
duffing = hamiltonians["duffing"]
323-
self.Hs["anhar"] = tf.Variable(duffing(ann_oper), dtype=tf.complex128)
323+
self.Hs["anhar"] = tf.constant(duffing(ann_oper), dtype=tf.complex128)
324324

325325
def init_Ls(self, ann_oper):
326326
"""
@@ -446,12 +446,12 @@ def init_Hs(self, ann_oper):
446446
Annihilation operator in the full Hilbert space
447447
"""
448448
resonator = hamiltonians["resonator"]
449-
self.Hs["freq"] = tf.Variable(resonator(ann_oper), dtype=tf.complex128)
449+
self.Hs["freq"] = tf.constant(resonator(ann_oper), dtype=tf.complex128)
450450
if self.hilbert_dim > 2:
451451
duffing = hamiltonians["duffing"]
452-
self.Hs["anhar"] = tf.Variable(duffing(ann_oper), dtype=tf.complex128)
452+
self.Hs["anhar"] = tf.constant(duffing(ann_oper), dtype=tf.complex128)
453453
third = hamiltonians["third_order"]
454-
self.Hs["beta"] = tf.Variable(third(ann_oper), dtype=tf.complex128)
454+
self.Hs["beta"] = tf.constant(third(ann_oper), dtype=tf.complex128)
455455

456456
def get_Hamiltonian(self):
457457
"""
@@ -546,7 +546,7 @@ def __init__(
546546
self.params["strength"] = strength
547547

548548
def init_Hs(self, opers_list):
549-
self.Hs["strength"] = tf.Variable(
549+
self.Hs["strength"] = tf.constant(
550550
self.hamiltonian_func(opers_list), dtype=tf.complex128
551551
)
552552

@@ -570,7 +570,7 @@ class Drive(LineComponent):
570570
def init_Hs(self, ann_opers: list):
571571
hs = []
572572
for a in ann_opers:
573-
hs.append(tf.Variable(self.hamiltonian_func(a), dtype=tf.complex128))
573+
hs.append(tf.constant(self.hamiltonian_func(a), dtype=tf.complex128))
574574
self.h = sum(hs)
575575

576576
def get_Hamiltonian(self):

‎c3/system/model.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -331,14 +331,14 @@ def get_Frame_Rotation(self, t_final: np.float64, freqs: dict, framechanges: dic
331331
tf.Tensor
332332
A (diagonal) propagator that adjust phases
333333
"""
334-
exponent = tf.Variable(0.0, dtype=tf.complex128)
334+
exponent = tf.constant(0.0, dtype=tf.complex128)
335335
for line in freqs.keys():
336336
freq = freqs[line]
337337
framechange = framechanges[line]
338338
qubit = self.couplings[line].connected[0]
339339
# TODO extend this to multiple qubits
340340
ann_oper = self.ann_opers[self.names.index(qubit)]
341-
num_oper = tf.Variable(
341+
num_oper = tf.constant(
342342
np.matmul(ann_oper.T.conj(), ann_oper), dtype=tf.complex128
343343
)
344344
# TODO test dressing of FR
@@ -375,12 +375,12 @@ def get_dephasing_channel(self, t_final, amps):
375375
qubit = self.couplings[line].connected[0]
376376
# TODO extend this to multiple qubits
377377
ann_oper = self.ann_opers[self.names.index(qubit)]
378-
num_oper = tf.Variable(
378+
num_oper = tf.constant(
379379
np.matmul(ann_oper.T.conj(), ann_oper), dtype=tf.complex128
380380
)
381381
Z = tf_utils.tf_super(
382382
tf.linalg.expm(
383-
1.0j * num_oper * tf.Variable(np.pi, dtype=tf.complex128)
383+
1.0j * num_oper * tf.constant(np.pi, dtype=tf.complex128)
384384
)
385385
)
386386
p = t_final * amp * self.dephasing_strength

‎c3/system/tasks.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ def initialise(self, drift_H, lindbladian=False, init_temp=None):
6969
else:
7070
raise Warning("C3:WARNING: We still need to do Von Neumann right.")
7171
else:
72-
state = tf.Variable(
72+
state = tf.constant(
7373
qt_utils.basis(dim, 0), shape=[dim, 1], dtype=tf.complex128
7474
)
7575
if lindbladian:
@@ -107,7 +107,7 @@ def confuse(self, pops):
107107
Populations after misclassification.
108108
109109
"""
110-
conf_matrix = tf.Variable([[1]], dtype=tf.float64)
110+
conf_matrix = tf.constant([[1]], dtype=tf.float64)
111111
for conf_row in self.params.values():
112112
row1 = conf_row.get_value()
113113
row2 = tf.ones_like(row1) - row1

‎c3/utils/tf_utils.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -417,7 +417,7 @@ def tf_matmul_n(tensor_list):
417417
def tf_log10(x):
418418
"""Tensorflow had no logarithm with base 10. This is ours."""
419419
numerator = tf.log(x)
420-
denominator = tf.log(tf.Variable(10, dtype=numerator.dtype))
420+
denominator = tf.log(tf.constant(10, dtype=numerator.dtype))
421421
return numerator / denominator
422422

423423

@@ -443,8 +443,8 @@ def tf_diff(l): # noqa
443443
returns the same shape by adding a 0 in the last entry.
444444
"""
445445
dim = l.shape[0] - 1
446-
diagonal = tf.Variable([-1] * dim + [0], dtype=l.dtype)
447-
offdiagonal = tf.Variable([1] * dim, dtype=l.dtype)
446+
diagonal = tf.constant([-1] * dim + [0], dtype=l.dtype)
447+
offdiagonal = tf.constant([1] * dim, dtype=l.dtype)
448448
proj = tf.linalg.diag(diagonal) + tf.linalg.diag(offdiagonal, k=1)
449449
return tf.linalg.matvec(proj, l)
450450

@@ -501,7 +501,7 @@ def tf_expm_dynamic(A, acc=1e-4):
501501
A_powers = A
502502
r += A
503503

504-
ii = tf.Variable(2, dtype=tf.complex128)
504+
ii = tf.constant(2, dtype=tf.complex128)
505505
while tf.reduce_max(tf.abs(A_powers)) > acc:
506506
A_powers = tf.matmul(A_powers, A) / ii
507507
ii += 1
@@ -564,7 +564,7 @@ def tf_choi_to_chi(U, dims=None):
564564
"""
565565
if dims is None:
566566
dims = [tf.sqrt(tf.cast(U.shape[0], U.dtype))]
567-
B = tf.Variable(qt_utils.pauli_basis([2] * len(dims)), dtype=tf.complex128)
567+
B = tf.constant(qt_utils.pauli_basis([2] * len(dims)), dtype=tf.complex128)
568568
return tf.linalg.adjoint(B) @ U @ B
569569

570570

@@ -708,5 +708,5 @@ def tf_project_to_comp(A, dims, to_super=False):
708708
proj = proj_list.pop()
709709
while not proj_list == []:
710710
proj = np.kron(proj_list.pop(), proj)
711-
P = tf.Variable(proj, dtype=A.dtype)
711+
P = tf.constant(proj, dtype=A.dtype)
712712
return tf.matmul(tf.matmul(P, A, transpose_a=True), P)

‎c3/utils/utils.py

+2
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,8 @@ def jsonify_list(data):
123123
return {k: jsonify_list(v) for k, v in data.items()}
124124
elif isinstance(data, list):
125125
return [jsonify_list(v) for v in data]
126+
elif isinstance(data, tuple):
127+
return tuple(jsonify_list(v) for v in data)
126128
elif isinstance(data, np.ndarray):
127129
return data.tolist()
128130
elif isinstance(data, ops.EagerTensor):

‎test/test_quantity.py

+5
Original file line numberDiff line numberDiff line change
@@ -123,3 +123,8 @@ def test_qty_np_conversions() -> None:
123123

124124
b = Quantity(np.array([0.0000001, 0.00001]))
125125
np.array([b])
126+
127+
c = Quantity([0, 0.1])
128+
assert len(c) == 2
129+
assert c.shape == [2]
130+

‎test/test_tf_utils.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@ def test_unitary_overlap(args: Tuple[List[int], List[int], List[int]]) -> None:
2424
Matrix A, Matrix B and Expected Overlap
2525
"""
2626
x, x_noisy, over = args
27-
pauli_x = tf.Variable(x)
28-
pauli_x_noisy = tf.Variable(x_noisy)
27+
pauli_x = tf.constant(x)
28+
pauli_x_noisy = tf.constant(x_noisy)
2929

3030
overlap = tf_unitary_overlap(pauli_x, pauli_x_noisy)
3131
assert overlap.numpy() > over

0 commit comments

Comments
 (0)
Please sign in to comment.