diff --git a/.all-contributorsrc b/.all-contributorsrc
index 610c8223..a625ed92 100644
--- a/.all-contributorsrc
+++ b/.all-contributorsrc
@@ -240,6 +240,96 @@
"contributions": [
"example"
]
+ },
+ {
+ "login": "FelixXu35",
+ "name": "Felix Xu",
+ "avatar_url": "https://avatars.githubusercontent.com/u/61252303?v=4",
+ "profile": "https://www.linkedin.com/in/felix-xu-16a153196/",
+ "contributions": [
+ "tutorial",
+ "code",
+ "test"
+ ]
+ },
+ {
+ "login": "hongyehu",
+ "name": "Hong-Ye Hu",
+ "avatar_url": "https://avatars.githubusercontent.com/u/50563225?v=4",
+ "profile": "https://scholar.harvard.edu/hongyehu/home",
+ "contributions": [
+ "doc"
+ ]
+ },
+ {
+ "login": "PeilinZHENG",
+ "name": "peilin",
+ "avatar_url": "https://avatars.githubusercontent.com/u/45784888?v=4",
+ "profile": "https://github.com/PeilinZHENG",
+ "contributions": [
+ "tutorial",
+ "code",
+ "test",
+ "doc"
+ ]
+ },
+ {
+ "login": "EmilianoG-byte",
+ "name": "Cristian Emiliano Godinez Ramirez",
+ "avatar_url": "https://avatars.githubusercontent.com/u/57567043?v=4",
+ "profile": "https://emilianog-byte.github.io",
+ "contributions": [
+ "code",
+ "test"
+ ]
+ },
+ {
+ "login": "ztzhu1",
+ "name": "ztzhu",
+ "avatar_url": "https://avatars.githubusercontent.com/u/111620128?v=4",
+ "profile": "https://github.com/ztzhu1",
+ "contributions": [
+ "code"
+ ]
+ },
+ {
+ "login": "royess",
+ "name": "Rabqubit",
+ "avatar_url": "https://avatars.githubusercontent.com/u/31059422?v=4",
+ "profile": "https://github.com/royess",
+ "contributions": [
+ "example"
+ ]
+ },
+ {
+ "login": "king-p3nguin",
+ "name": "Kazuki Tsuoka",
+ "avatar_url": "https://avatars.githubusercontent.com/u/103920010?v=4",
+ "profile": "https://github.com/king-p3nguin",
+ "contributions": [
+ "code",
+ "test",
+ "doc",
+ "example"
+ ]
+ },
+ {
+ "login": "Gopal-Dahale",
+ "name": "Gopal Ramesh Dahale",
+ "avatar_url": "https://avatars.githubusercontent.com/u/49199003?v=4",
+ "profile": "https://gopal-dahale.github.io/",
+ "contributions": [
+ "example"
+ ]
+ },
+ {
+ "login": "AbdullahKazi500",
+ "name": "Chanandellar Bong",
+ "avatar_url": "https://avatars.githubusercontent.com/u/75779966?v=4",
+ "profile": "https://github.com/AbdullahKazi500",
+ "contributions": [
+ "example"
+ ]
}
],
"contributorsPerLine": 6,
@@ -247,5 +337,6 @@
"repoType": "github",
"repoHost": "https://github.com",
"projectName": "tensorcircuit",
- "projectOwner": "tencent-quantum-lab"
+ "projectOwner": "tencent-quantum-lab",
+ "commitType": "docs"
}
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 00000000..176a458f
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+* text=auto
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 7a0a7dff..964f9d67 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -7,7 +7,7 @@ jobs:
strategy:
matrix:
os: [ubuntu-20.04, macos-latest] # macos-latest disabled to save quota
- python-version: [3.8]
+ python-version: ["3.10"]
fail-fast: false
steps:
- uses: actions/checkout@v2
diff --git a/.github/workflows/nightly_release.yml b/.github/workflows/nightly_release.yml
index ce62cc9c..8a1dbda8 100644
--- a/.github/workflows/nightly_release.yml
+++ b/.github/workflows/nightly_release.yml
@@ -18,7 +18,7 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v4
with:
- python-version: 3.8
+ python-version: "3.10"
cache: "pip"
- name: install dependencies
run: |
diff --git a/.gitignore b/.gitignore
index c1649c55..c21b5efb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -28,3 +28,4 @@ examples/Unified AD model.ipynb
docs/source/locale/zh/LC_MESSAGES/textbook.po
docs/source/locale/zh/LC_MESSAGES/whitepapertoc_cn.po
docs/source/locale/zh/LC_MESSAGES/textbooktoc.po
+test.qasm
diff --git a/.readthedocs.yaml b/.readthedocs.yaml
new file mode 100644
index 00000000..2acb1049
--- /dev/null
+++ b/.readthedocs.yaml
@@ -0,0 +1,24 @@
+# .readthedocs.yaml
+# Read the Docs configuration file
+# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
+
+# Required
+version: 2
+
+formats:
+ - pdf
+
+# Set the version of Python and other tools you might need
+build:
+ os: ubuntu-20.04
+ tools:
+ python: "3.8"
+
+# Build documentation in the docs/ directory with Sphinx
+sphinx:
+ configuration: docs/source/conf.py
+# We recommend specifying your dependencies to enable reproducible builds:
+# https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
+python:
+ install:
+ - requirements: requirements/requirements-rtd.txt
diff --git a/CHANGELOG.md b/CHANGELOG.md
index d922b381..3f072c9a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,110 @@
## Unreleased
+### Added
+
+- Add support for parameter expression in qiskit translation
+
+## 0.12.0
+
+### Added
+
+- Add translation of r gate from qiskit
+
+- Add `det` method at backends
+
+- Add fermion Gaussian state simulator in `fgs.py`
+
+- Add `partial_transpose` and `entanglement_negativity` method in `quantum.py`
+
+- Add `reduced_wavefunction` method in `quantum.py` to get reduced pure state
+
+### Changed
+
+- move ensemble module to applications/ai (breaking changes)
+
+- tc2qiskit now record qiskit measure with incremental clbit from 0
+
+### Fixed
+
+- Support degenerate eigenvalue for jax backend `eigh` method when using AD
+
+- Fixed `cu` gate translation from qiskit to avoid qiskit bug
+
+- Fixed jax refactoring (0.4.24) where SVD and QR return a namedtuple instead of a tuple
+
+- Fix qiskit<1.0 and tf<2.16
+
+## 0.11.0
+
+### Added
+
+- Add multiple GPU VQE examples using jax pmap
+
+- Add `with_prob` option to `general_kraus` so that the probability of each option can be returned together
+
+- Add benchmark example showcasing new way of implementing matrix product using vmap
+
+- Add keras3 example showcasing integration with tc
+
+- Add circuit copy method that avoid shallow copy issue `Circuit.copy()`
+
+- Add end to end infrastructures and methods for classical shadow in `shadows.py`
+
+- Add classical shadow tutorial
+
+- Add NN-VQE tutorial
+
+### Fixed
+
+- improve the `adaptive_vmap` to support internal jit and pytree output
+
+- fix `pauli_gates` dtype unchange issue when set new dtype (not recommend to use this attr anymore)
+
+- fix rem `apply_correction` bug when non-numpy backend is set
+
+- fix tf warning for `cast` with higher version of tf
+
+### Changed
+
+- The static method `BaseCircuit.copy` is renamed as `BaseCircuit.copy_nodes` (breaking changes)
+
+## 0.10.0
+
+### Added
+
+- `c.measure_instruction(*qubits)` now supports multiple ints specified at the same time
+
+- `c.expectation_ps()` now also supports `ps` argument directly (pauli structures)
+
+- Add tc version print in `tc.about()` method
+
+- tc now supports fancy batch indexing for gates, e.g. `c.rxx([0, 1, 2], [1, 2, 3], theta=K.ones([3]))`
+
+- Task management via group tag (when `submit_task` and `list_tasks`)
+
+- `batch_expectation_ps` now supports local device without topology and thus unify the interface for numerical exact simulation, numerical simulation with measurement shots and QPU experiments
+
+- introduce two stage compiling for `batch_expectation_ps` to save some compiling overhead
+
+- Add experimental support for ODE backend pulse level control simulation/analog quantum computing
+
+- make the pulse level control support differentiating the end time
+
+- Add new qem module with qem methods: zne, dd and rc
+
+### Fixed
+
+- `tc.results.counts.plot_histogram` now can dispatch kws to corresponding qiskit method
+
+- New implementation for `c.inverse()` to partially avoid unrecognized gate name issue
+
+- Fixed bug for `batch_expectation_ps` for jax backend
+
+- Partially fix the SVD numerical stability bug on tf backend when using `MPSCircuit`
+
+- List syntax for gate now supports range
+
## 0.9.1
### Added
diff --git a/README.md b/README.md
index 0795945e..c46b54c7 100644
--- a/README.md
+++ b/README.md
@@ -31,13 +31,13 @@
TensorCircuit is the next generation of quantum software framework with support for automatic differentiation, just-in-time compiling, hardware acceleration, and vectorized parallelism.
-TensorCircuit is built on top of modern machine learning frameworks and is machine learning backend agnostic. It is specifically suitable for highly efficient simulations of quantum-classical hybrid paradigm and variational quantum algorithms. It also supports real quantum hardware access and provides CPU/GPU/QPU hybrid deployment solutions since v0.9.
+TensorCircuit is built on top of modern machine learning frameworks: Jax, TensorFlow, and PyTorch. It is specifically suitable for highly efficient simulations of quantum-classical hybrid paradigm and variational quantum algorithms in ideal, noisy and approximate cases. It also supports real quantum hardware access and provides CPU/GPU/QPU hybrid deployment solutions since v0.9.
## Getting Started
-Please begin with [Quick Start](/docs/source/quickstart.rst).
+Please begin with [Quick Start](/docs/source/quickstart.rst) in the [full documentation](https://tensorcircuit.readthedocs.io/).
-For more information and introductions, please refer to helpful [example scripts](/examples) and [full documentation](https://tensorcircuit.readthedocs.io/). API docstrings and test cases in [tests](/tests) are also informative.
+For more information on software usage, sota algorithm implementation and engineer paradigm demonstration, please refer to 70+ [example scripts](/examples) and 30+ [tutorial notebooks](https://tensorcircuit.readthedocs.io/en/latest/#tutorials). API docstrings and test cases in [tests](/tests) are also informative.
The following are some minimal demos.
@@ -76,6 +76,57 @@ theta = tc.array_to_tensor(1.0)
print(g(theta))
```
+
+ More highlight features for TensorCircuit (click for details)
+
+- Sparse Hamiltonian generation and expectation evaluation:
+
+```python
+n = 6
+pauli_structures = []
+weights = []
+for i in range(n):
+ pauli_structures.append(tc.quantum.xyz2ps({"z": [i, (i + 1) % n]}, n=n))
+ weights.append(1.0)
+for i in range(n):
+ pauli_structures.append(tc.quantum.xyz2ps({"x": [i]}, n=n))
+ weights.append(-1.0)
+h = tc.quantum.PauliStringSum2COO(pauli_structures, weights)
+print(h)
+# BCOO(complex64[64, 64], nse=448)
+c = tc.Circuit(n)
+c.h(range(n))
+energy = tc.templates.measurements.operator_expectation(c, h)
+# -6
+```
+
+- Large-scale simulation with tensor network engine
+
+```python
+# tc.set_contractor("cotengra-30-10")
+n=500
+c = tc.Circuit(n)
+c.h(0)
+c.cx(range(n-1), range(1, n))
+c.expectation_ps(z=[0, n-1], reuse=False)
+```
+
+- Density matrix simulator and quantum info quantities
+
+```python
+c = tc.DMCircuit(2)
+c.h(0)
+c.cx(0, 1)
+c.depolarizing(1, px=0.1, py=0.1, pz=0.1)
+dm = c.state()
+print(tc.quantum.entropy(dm))
+print(tc.quantum.entanglement_entropy(dm, [0]))
+print(tc.quantum.entanglement_negativity(dm, [0]))
+print(tc.quantum.log_negativity(dm, [0]))
+```
+
+
+
## Install
The package is written in pure Python and can be obtained via pip as:
@@ -92,13 +143,6 @@ pip install tensorcircuit[tensorflow]
Other optional dependencies include `[torch]`, `[jax]`, `[qiskit]` and `[cloud]`.
-For the nightly build of tensorcircuit with new features, try:
-
-```python
-pip uninstall tensorcircuit
-pip install tensorcircuit-nightly
-```
-
We also have [Docker support](/docker).
## Advantages
@@ -117,24 +161,83 @@ We also have [Docker support](/docker).
- Elegance
- - Flexibility: customized contraction, multiple ML backend/interface choices, multiple dtype precisions
+ - Flexibility: customized contraction, multiple ML backend/interface choices, multiple dtype precisions, multiple QPU providers
- API design: quantum for humans, less code, more power
+- Batteries included
+
+
+ Tons of amazing features and built in tools for research (click for details)
+
+ - Support **super large circuit simulation** using tensor network engine.
+
+ - Support **noisy simulation** with both Monte Carlo and density matrix (tensor network powered) modes.
+
+ - Support **approximate simulation** with MPS-TEBD modes.
+
+ - Support **analog/digital hybrid simulation** (time dependent Hamiltonian evolution, **pulse** level simulation) with neural ode modes.
+
+ - Support **Fermion Gaussian state** simulation with expectation, entanglement, measurement, ground state, real and imaginary time evolution.
+
+ - Support **qudits simulation**.
+
+ - Support **parallel** quantum circuit evaluation across **multiple GPUs**.
+
+ - Highly customizable **noise model** with gate error and scalable readout error.
+
+ - Support for **non-unitary** gate and post-selection simulation.
+
+ - Support **real quantum devices access** from different providers.
+
+ - **Scalable readout error mitigation** native to both bitstring and expectation level with automatic qubit mapping consideration.
+
+ - **Advanced quantum error mitigation methods** and pipelines such as ZNE, DD, RC, etc.
+
+ - Support **MPS/MPO** as representations for input states, quantum gates and observables to be measured.
+
+ - Support **vectorized parallelism** on circuit inputs, circuit parameters, circuit structures, circuit measurements and these vectorization can be nested.
+
+ - Gradients can be obtained with both **automatic differenation** and parameter shift (vmap accelerated) modes.
+
+ - **Machine learning interface/layer/model** abstraction in both TensorFlow and PyTorch for both numerical simulation and real QPU experiments.
+
+ - Circuit sampling supports both final state sampling and perfect sampling from tensor networks.
+
+ - Light cone reduction support for local expectation calculation.
+
+ - Highly customizable tensor network contraction path finder with opteinsum interface.
+
+ - Observables are supported in measurement, sparse matrix, dense matrix and MPO format.
+
+ - Super fast weighted sum Pauli string Hamiltonian matrix generation.
+
+ - Reusable common circuit/measurement/problem templates and patterns.
+
+ - Jittable classical shadow infrastructures.
+
+ - SOTA quantum algorithm and model implementations.
+
+ - Support hybrid workflows and pipelines with CPU/GPU/QPU hardware from local/cloud/hpc resources using tf/torch/jax/cupy/numpy frameworks all at the same time.
+
+
+
## Contributing
### Status
-This project is released by [Tencent Quantum Lab](https://quantum.tencent.com/) and is created and maintained by [Shi-Xin Zhang](https://github.com/refraction-ray) with current core authors [Shi-Xin Zhang](https://github.com/refraction-ray) and [Yu-Qin Chen](https://github.com/yutuer21). We also thank [contributions](https://github.com/tencent-quantum-lab/tensorcircuit/graphs/contributors) from the lab and the open source community.
+This project is created and maintained by [Shi-Xin Zhang](https://github.com/refraction-ray) with current core authors [Shi-Xin Zhang](https://github.com/refraction-ray) and [Yu-Qin Chen](https://github.com/yutuer21). We also thank [contributions](https://github.com/tencent-quantum-lab/tensorcircuit/graphs/contributors) from the open source community.
### Citation
-If this project helps in your research, please cite our software whitepaper published in Quantum:
+If this project helps in your research, please cite our software whitepaper to acknowledge the work put into the development of TensorCircuit.
-[TensorCircuit: a Quantum Software Framework for the NISQ Era](https://quantum-journal.org/papers/q-2023-02-02-912/)
+[TensorCircuit: a Quantum Software Framework for the NISQ Era](https://quantum-journal.org/papers/q-2023-02-02-912/) (published in Quantum)
which is also a good introduction to the software.
+Research works citing TensorCircuit can be highlighted in [Research and Applications section](https://github.com/tencent-quantum-lab/tensorcircuit#research-and-applications).
+
### Guidelines
For contribution guidelines and notes, see [CONTRIBUTING](/CONTRIBUTING.md).
@@ -179,6 +282,17 @@ TensorCircuit is open source, released under the Apache License, Version 2.0.
 隐公观鱼 💻 ⚠️ |
 WiuYuan 💡 |
+  Felix Xu ✅ 💻 ⚠️ |
+  Hong-Ye Hu 📖 |
+  peilin ✅ 💻 ⚠️ 📖 |
+  Cristian Emiliano Godinez Ramirez 💻 ⚠️ |
+
+
+  ztzhu 💻 |
+  Rabqubit 💡 |
+  Kazuki Tsuoka 💻 ⚠️ 📖 💡 |
+  Gopal Ramesh Dahale 💡 |
+  Chanandellar Bong 💡 |
@@ -207,7 +321,7 @@ Reference paper: https://arxiv.org/abs/2010.08561 (published in QST).
For the application of Variational Quantum-Neural Hybrid Eigensolver, see [applications](/tensorcircuit/applications).
-Reference paper: https://arxiv.org/abs/2106.05105 (published in PRL) and https://arxiv.org/abs/2112.10380.
+Reference paper: https://arxiv.org/abs/2106.05105 (published in PRL) and https://arxiv.org/abs/2112.10380 (published in AQT).
### VQEX-MBL
@@ -221,14 +335,71 @@ For the numerical demosntration of discrete time crystal enabled by Stark many-b
Reference paper: https://arxiv.org/abs/2208.02866 (published in PRL).
-### EMQAOA-DARBO
+### RA-Training
-For the numerical simulation and hardware experiments with error mitigation on QAOA, see the [project repo](https://github.com/sherrylixuecheng/EMQAOA-DARBO).
+For the numerical simulation of variational quantum algorithm training using random gate activation strategy by us, see the [project repo](https://github.com/ls-iastu/RAtraining).
-Reference paper: https://arxiv.org/abs/2303.14877.
+Reference paper: https://arxiv.org/abs/2303.08154 (published in PRR as a Letter).
### TenCirChem
[TenCirChem](https://github.com/tencent-quantum-lab/TenCirChem) is an efficient and versatile quantum computation package for molecular properties. TenCirChem is based on TensorCircuit and is optimized for chemistry applications.
-Reference paper: https://arxiv.org/abs/2303.10825.
+Reference paper: https://arxiv.org/abs/2303.10825 (published in JCTC).
+
+### EMQAOA-DARBO
+
+For the numerical simulation and hardware experiments with error mitigation on QAOA, see the [project repo](https://github.com/sherrylixuecheng/EMQAOA-DARBO).
+
+Reference paper: https://arxiv.org/abs/2303.14877 (published in Communications Physics).
+
+### NN-VQA
+
+For the setup and simulation code of neural network encoded variational quantum eigensolver, see the [demo](/docs/source/tutorials/nnvqe.ipynb).
+
+Reference paper: https://arxiv.org/abs/2308.01068 (published in PRApplied).
+
+### More works
+
+
+ More research works and code projects using TensorCircuit (click for details)
+
+- Neural Predictor based Quantum Architecture Search: https://arxiv.org/abs/2103.06524 (published in Machine Learning: Science and Technology).
+
+- Quantum imaginary-time control for accelerating the ground-state preparation: https://arxiv.org/abs/2112.11782 (published in PRR).
+
+- Efficient Quantum Simulation of Electron-Phonon Systems by Variational Basis State Encoder: https://arxiv.org/abs/2301.01442 (published in PRR).
+
+- Variational Quantum Simulations of Finite-Temperature Dynamical Properties via Thermofield Dynamics: https://arxiv.org/abs/2206.05571.
+
+- Understanding quantum machine learning also requires rethinking generalization: https://arxiv.org/abs/2306.13461 (published in Nature Communications).
+
+- Decentralized Quantum Federated Learning for Metaverse: Analysis, Design and Implementation: https://arxiv.org/abs/2306.11297. Code: https://github.com/s222416822/BQFL.
+
+- Non-IID quantum federated learning with one-shot communication complexity: https://arxiv.org/abs/2209.00768 (published in Quantum Machine Intelligence). Code: https://github.com/JasonZHM/quantum-fed-infer.
+
+- Quantum generative adversarial imitation learning: https://doi.org/10.1088/1367-2630/acc605 (published in New Journal of Physics).
+
+- GSQAS: Graph Self-supervised Quantum Architecture Search: https://arxiv.org/abs/2303.12381 (published in Physica A: Statistical Mechanics and its Applications).
+
+- Practical advantage of quantum machine learning in ghost imaging: https://www.nature.com/articles/s42005-023-01290-1 (published in Communications Physics).
+
+- Zero and Finite Temperature Quantum Simulations Powered by Quantum Magic: https://arxiv.org/abs/2308.11616.
+
+- Comparison of Quantum Simulators for Variational Quantum Search: A Benchmark Study: https://arxiv.org/abs/2309.05924.
+
+- Statistical analysis of quantum state learning process in quantum neural networks: https://arxiv.org/abs/2309.14980 (published in NeurIPS).
+
+- Generative quantum machine learning via denoising diffusion probabilistic models: https://arxiv.org/abs/2310.05866 (published in PRL).
+
+- Quantum imaginary time evolution and quantum annealing meet topological sector optimization: https://arxiv.org/abs/2310.04291.
+
+- Google Summer of Code 2023 Projects (QML4HEP): https://github.com/ML4SCI/QMLHEP, https://github.com/Gopal-Dahale/qgnn-hep, https://github.com/salcc/QuantumTransformers.
+
+- Absence of barren plateaus in finite local-depth circuits with long-range entanglement: https://arxiv.org/abs/2311.01393 (published in PRL).
+
+- Non-Markovianity benefits quantum dynamics simulation: https://arxiv.org/abs/2311.17622.
+
+
+
+If you want to highlight your research work or projects here, feel free to add by opening PR.
diff --git a/README_cn.md b/README_cn.md
index bc49327d..8137eb85 100644
--- a/README_cn.md
+++ b/README_cn.md
@@ -25,17 +25,17 @@
English | 简体中文
-TensorCircuit 是下一代量子软件框架,支持自动微分、即时编译、硬件加速和向量并行化。
+TensorCircuit 是下一代量子软件框架,完美支持自动微分、即时编译、硬件加速和向量并行化。
-TensorCircuit 建立在现代机器学习框架之上,并且与机器学习后端无关。 它特别适用于量子经典混合范式和变分量子算法的高效模拟。
+TensorCircuit 建立在现代机器学习框架 Jax, TensorFlow, PyTorch 之上,支持机器学习后端无关的统一界面。 其特别适用于理想情况、含噪声情况及可控近似情况下,大规模量子经典混合范式和变分量子算法的高效模拟。
-TensorCircuit 现在支持真实量子硬件连接和实验,并提供优雅的 CPU/GPU/QPU 混合部署方案(v0.9+)。
+TensorCircuit 现在支持真实量子硬件连接和实验,并提供优雅的 CPU/GPU/QPU 混合部署训练方案(v0.9+)。
## 入门
-请从 [快速上手](/docs/source/quickstart.rst) 和 [Jupyter 教程](/docs/source/tutorials) 开始。
+请从 [完整文档](https://tensorcircuit.readthedocs.io/zh/latest/) 中的 [快速上手](/docs/source/quickstart.rst) 开始。
-有关更多信息和介绍,请参阅有用的 [示例脚本](/examples) 和 [完整文档](https://tensorcircuit.readthedocs.io/zh/latest/)。 [测试](/tests)用例和 API docstring 也提供了丰富的使用信息。
+有关软件用法,算法实现和工程范式演示的更多信息和介绍,请参阅 70+ [示例脚本](/examples) 和 30+ [案例教程](https://tensorcircuit.readthedocs.io/zh/latest/#tutorials)。 [测试](/tests) 用例和 API docstring 也提供了丰富的使用信息。
以下是一些最简易的演示。
@@ -52,7 +52,7 @@ print(c.expectation_ps(z=[0, 1]))
print(c.sample(allow_state=True, batch=1024, format="count_dict_bin"))
```
-- 运行时特性定制:
+- 运行时特性设置:
```python
tc.set_backend("tensorflow")
@@ -115,7 +115,7 @@ pip install tensorcircuit-nightly
- 优雅
- - 灵活性:自定义张量收缩、多种 ML 后端/接口选择、多种数值精度
+ - 灵活性:自定义张量收缩、多种 ML 后端/接口选择、多种数值精度、多种量子硬件
- API 设计:人类可理解的量子,更少的代码,更多的可能
@@ -123,11 +123,11 @@ pip install tensorcircuit-nightly
### 现况
-该项目由[腾讯量子实验室](https://quantum.tencent.com/)发布,由 [Shi-Xin Zhang](https://github.com/refraction-ray) 创造并维护。当前核心作者包括 [Shi-Xin Zhang](https://github.com/refraction-ray) 和 [Yu-Qin Chen](https://github.com/yutuer21)。我们也感谢来自实验室和开源社区的[贡献](https://github.com/tencent-quantum-lab/tensorcircuit/graphs/contributors)。
+该项目由 [Shi-Xin Zhang](https://github.com/refraction-ray) 创造并维护。当前核心作者包括 [Shi-Xin Zhang](https://github.com/refraction-ray) 和 [Yu-Qin Chen](https://github.com/yutuer21)。我们也感谢来自开源社区的[贡献](https://github.com/tencent-quantum-lab/tensorcircuit/graphs/contributors)。
### 引用
-如果该软件对您的研究有帮助, 请引用我们发表在 Quantum 期刊的白皮书文章:
+如果该软件对您的研究有帮助, 请引用我们发表在 Quantum 期刊的白皮书文章来支持我们的研发付出。
[TensorCircuit: a Quantum Software Framework for the NISQ Era](https://quantum-journal.org/papers/q-2023-02-02-912/).
@@ -167,14 +167,26 @@ VQEX 在 MBL 相位识别上的应用见 [教程](/docs/source/tutorials/vqex_mb
参考论文: https://arxiv.org/abs/2208.02866 (PRL)。
+### RA-Training
+
+利用我们提出的随机量子门激活策略训练优化变分量子算法的实现请参考 [项目](https://github.com/ls-iastu/RAtraining).
+
+参考论文: https://arxiv.org/abs/2303.08154。
+
+### TenCirChem
+
+[TenCirChem](https://github.com/tencent-quantum-lab/TenCirChem) 是高效的,专注于处理和计算分子性质的量子计算软件。其基于 TensorCircuit 并为量子化学任务进行了专门的优化。
+
+参考论文: https://arxiv.org/abs/2303.10825 (JCTC)。
+
### EMQAOA-DARBO
数值模拟和带错误消除的真实量子硬件实验验证 QAOA 优化的代码请参考 [项目](https://github.com/sherrylixuecheng/EMQAOA-DARBO)。
参考论文: https://arxiv.org/abs/2303.14877。
-### TenCirChem
+### NN-VQA
-[TenCirChem](https://github.com/tencent-quantum-lab/TenCirChem) 是高效的,专注于处理和计算分子性质的量子计算软件。其基于 TensorCircuit 并为量子化学任务进行了专门的优化。
+关于神经网络编码的变分量子算法的实现和工作流, 见 [教程](/docs/source/tutorials/nnvqe.ipynb)。
-参考论文: https://arxiv.org/abs/2303.10825。
+参考论文: https://arxiv.org/abs/2308.01068。
diff --git a/check_all.sh b/check_all.sh
old mode 100755
new mode 100644
diff --git a/docker/Dockerfile_v2 b/docker/Dockerfile_v2
new file mode 100644
index 00000000..9f2f4523
--- /dev/null
+++ b/docker/Dockerfile_v2
@@ -0,0 +1,37 @@
+FROM nvidia/cuda:11.7.1-cudnn8-devel-ubuntu20.04
+# nvidia/cuda:11.6.0-cudnn8-devel-ubuntu20.04
+
+RUN apt update && DEBIAN_FRONTEND=noninteractive apt install -y \
+ wget \
+ git \
+ vim \
+ pandoc
+
+RUN wget -q -P /tmp \
+ https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \
+ && bash /tmp/Miniconda3-latest-Linux-x86_64.sh -b -p /opt/conda \
+ && rm /tmp/Miniconda3-latest-Linux-x86_64.sh
+
+ENV PATH="/opt/conda/bin:$PATH"
+
+RUN conda install -y \
+ pip \
+ python=3.10
+
+COPY requirements/requirements-docker-v2.txt /requirements-docker-v2.txt
+
+# RUN pip install -r /requirements-docker-v2.txt -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
+RUN pip install -i https://pypi.tuna.tsinghua.edu.cn/simple -r /requirements-docker-v2.txt -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
+
+# RUN pip install nvidia-cudnn-cu11==8.6.0.163 ray
+RUN pip install -i https://pypi.tuna.tsinghua.edu.cn/simple nvidia-cudnn-cu11==8.6.0.163 ray
+
+RUN pip install tensorcircuit
+
+# requirements conflict for ray
+# jax must have cudnn>8.6 otherwise fail when init array on gpu,
+# while torch insists cudnn 8.5 in setup but 8.6 can also work for torch
+
+RUN echo export TF_CPP_MIN_LOG_LEVEL=3 >> ~/.bashrc
+
+CMD ["/bin/bash"]
\ No newline at end of file
diff --git a/docker/README.md b/docker/README.md
index c9940635..1c5aea0d 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -4,6 +4,12 @@ Run the following command to build the docker for tensorcircuit at parent path:
sudo docker build . -f docker/Dockerfile -t tensorcircuit
```
+Since v0.10 we introduce new docker env based on ubuntu20.04+cuda11.7+py3.10 (+ pip installed tensorcircuit package), build the new docker use
+
+```bash
+sudo docker build . -f docker/Dockerfile_v2 -t tensorcircuit
+```
+
One can also pull the [official image](https://hub.docker.com/repository/docker/tensorcircuit/tensorcircuit) from DockerHub as
```bash
@@ -15,14 +21,9 @@ Run the docker container by the following command:
```bash
sudo docker run -it --network host --gpus all tensorcircuit
-# if one also wants to mount local source code, also add args `-v "$(pwd)":/app`
-
-# using tensorcircuit/tensorcircuit to run the prebuild docker image from dockerhub
+# if one also wants to mount local source code, also add args `-v "$(pwd)":/root`
-# for old dockerfile with no runtime env setting
-# sudo docker run -it --network host -e LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-11.0/targets/x86_64-linux/lib -e PYTHONPATH=/app -v "$(pwd)":/app --gpus all tensorcircuit
+# using tensorcircuit/tensorcircuit:latest to run the prebuild docker image from dockerhub
```
-`export TF_CPP_MIN_LOG_LEVEL=3` maybe necessary since jax suprisingly frequently complain about ptxas version problem. And `export CUDA_VISIBLE_DEVICES=-1` if you want to test only on CPU.
-
-The built docker has no tensorcircuit pip package installed but left with a tensorcircuit source code dir. So one can `python setup.py develop` to install tensorcircuit locally (one can also mount the tensorcircuit codebase on host) or `pip install tensorcircuit` within the running docker.
+`export CUDA_VISIBLE_DEVICES=-1` if you want to test only on CPU.
diff --git a/docs/source/api/about.rst b/docs/source/api/about.rst
index 8f7bbf76..e065f1eb 100644
--- a/docs/source/api/about.rst
+++ b/docs/source/api/about.rst
@@ -1,5 +1,5 @@
tensorcircuit.about
-==================================================
+================================================================================
.. automodule:: tensorcircuit.about
:members:
:undoc-members:
diff --git a/docs/source/api/abstractcircuit.rst b/docs/source/api/abstractcircuit.rst
index 2caf0af1..3d67a499 100644
--- a/docs/source/api/abstractcircuit.rst
+++ b/docs/source/api/abstractcircuit.rst
@@ -1,5 +1,5 @@
tensorcircuit.abstractcircuit
-==================================================
+================================================================================
.. automodule:: tensorcircuit.abstractcircuit
:members:
:undoc-members:
diff --git a/docs/source/api/applications.rst b/docs/source/api/applications.rst
index ad329ccf..85c31126 100644
--- a/docs/source/api/applications.rst
+++ b/docs/source/api/applications.rst
@@ -1,9 +1,13 @@
tensorcircuit.applications
-==================================================
+================================================================================
.. toctree::
+ applications/ai.rst
applications/dqas.rst
+ applications/finance.rst
applications/graphdata.rst
applications/layers.rst
+ applications/optimization.rst
+ applications/physics.rst
applications/utils.rst
applications/vags.rst
applications/van.rst
diff --git a/docs/source/api/applications/ai.rst b/docs/source/api/applications/ai.rst
new file mode 100644
index 00000000..96a22cdb
--- /dev/null
+++ b/docs/source/api/applications/ai.rst
@@ -0,0 +1,4 @@
+tensorcircuit.applications.ai
+================================================================================
+.. toctree::
+ ai/ensemble.rst
\ No newline at end of file
diff --git a/docs/source/api/applications/ai/ensemble.rst b/docs/source/api/applications/ai/ensemble.rst
new file mode 100644
index 00000000..0173ac00
--- /dev/null
+++ b/docs/source/api/applications/ai/ensemble.rst
@@ -0,0 +1,7 @@
+tensorcircuit.applications.ai.ensemble
+================================================================================
+.. automodule:: tensorcircuit.applications.ai.ensemble
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/applications/dqas.rst b/docs/source/api/applications/dqas.rst
index 32457e1f..73cacd43 100644
--- a/docs/source/api/applications/dqas.rst
+++ b/docs/source/api/applications/dqas.rst
@@ -1,5 +1,5 @@
tensorcircuit.applications.dqas
-==================================================
+================================================================================
.. automodule:: tensorcircuit.applications.dqas
:members:
:undoc-members:
diff --git a/docs/source/api/applications/finance.rst b/docs/source/api/applications/finance.rst
new file mode 100644
index 00000000..d3302b31
--- /dev/null
+++ b/docs/source/api/applications/finance.rst
@@ -0,0 +1,4 @@
+tensorcircuit.applications.finance
+================================================================================
+.. toctree::
+ finance/portfolio.rst
\ No newline at end of file
diff --git a/docs/source/api/applications/finance/portfolio.rst b/docs/source/api/applications/finance/portfolio.rst
new file mode 100644
index 00000000..993b5754
--- /dev/null
+++ b/docs/source/api/applications/finance/portfolio.rst
@@ -0,0 +1,7 @@
+tensorcircuit.applications.finance.portfolio
+================================================================================
+.. automodule:: tensorcircuit.applications.finance.portfolio
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/applications/graphdata.rst b/docs/source/api/applications/graphdata.rst
index 22e1af13..44851513 100644
--- a/docs/source/api/applications/graphdata.rst
+++ b/docs/source/api/applications/graphdata.rst
@@ -1,5 +1,5 @@
tensorcircuit.applications.graphdata
-==================================================
+================================================================================
.. automodule:: tensorcircuit.applications.graphdata
:members:
:undoc-members:
diff --git a/docs/source/api/applications/layers.rst b/docs/source/api/applications/layers.rst
index 69303e98..d4f49e81 100644
--- a/docs/source/api/applications/layers.rst
+++ b/docs/source/api/applications/layers.rst
@@ -1,5 +1,5 @@
tensorcircuit.applications.layers
-==================================================
+================================================================================
.. automodule:: tensorcircuit.applications.layers
:members:
:undoc-members:
diff --git a/docs/source/api/applications/optimization.rst b/docs/source/api/applications/optimization.rst
new file mode 100644
index 00000000..87a0ffbb
--- /dev/null
+++ b/docs/source/api/applications/optimization.rst
@@ -0,0 +1,7 @@
+tensorcircuit.applications.optimization
+================================================================================
+.. automodule:: tensorcircuit.applications.optimization
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/applications/physics.rst b/docs/source/api/applications/physics.rst
new file mode 100644
index 00000000..98d1a2ed
--- /dev/null
+++ b/docs/source/api/applications/physics.rst
@@ -0,0 +1,5 @@
+tensorcircuit.applications.physics
+================================================================================
+.. toctree::
+ physics/baseline.rst
+ physics/fss.rst
\ No newline at end of file
diff --git a/docs/source/api/applications/physics/baseline.rst b/docs/source/api/applications/physics/baseline.rst
new file mode 100644
index 00000000..2ac581ba
--- /dev/null
+++ b/docs/source/api/applications/physics/baseline.rst
@@ -0,0 +1,7 @@
+tensorcircuit.applications.physics.baseline
+================================================================================
+.. automodule:: tensorcircuit.applications.physics.baseline
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/applications/physics/fss.rst b/docs/source/api/applications/physics/fss.rst
new file mode 100644
index 00000000..d65cd6c1
--- /dev/null
+++ b/docs/source/api/applications/physics/fss.rst
@@ -0,0 +1,7 @@
+tensorcircuit.applications.physics.fss
+================================================================================
+.. automodule:: tensorcircuit.applications.physics.fss
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/applications/utils.rst b/docs/source/api/applications/utils.rst
index d4549700..4114e7d8 100644
--- a/docs/source/api/applications/utils.rst
+++ b/docs/source/api/applications/utils.rst
@@ -1,5 +1,5 @@
tensorcircuit.applications.utils
-==================================================
+================================================================================
.. automodule:: tensorcircuit.applications.utils
:members:
:undoc-members:
diff --git a/docs/source/api/applications/vags.rst b/docs/source/api/applications/vags.rst
index 5b951bd3..af0f451f 100644
--- a/docs/source/api/applications/vags.rst
+++ b/docs/source/api/applications/vags.rst
@@ -1,5 +1,5 @@
tensorcircuit.applications.vags
-==================================================
+================================================================================
.. automodule:: tensorcircuit.applications.vags
:members:
:undoc-members:
diff --git a/docs/source/api/applications/van.rst b/docs/source/api/applications/van.rst
index 463e44d2..5c90f2e5 100644
--- a/docs/source/api/applications/van.rst
+++ b/docs/source/api/applications/van.rst
@@ -1,5 +1,5 @@
tensorcircuit.applications.van
-==================================================
+================================================================================
.. automodule:: tensorcircuit.applications.van
:members:
:undoc-members:
diff --git a/docs/source/api/applications/vqes.rst b/docs/source/api/applications/vqes.rst
index e3c775e5..d868c634 100644
--- a/docs/source/api/applications/vqes.rst
+++ b/docs/source/api/applications/vqes.rst
@@ -1,5 +1,5 @@
tensorcircuit.applications.vqes
-==================================================
+================================================================================
.. automodule:: tensorcircuit.applications.vqes
:members:
:undoc-members:
diff --git a/docs/source/api/backends.rst b/docs/source/api/backends.rst
index 4504e569..cfc63bec 100644
--- a/docs/source/api/backends.rst
+++ b/docs/source/api/backends.rst
@@ -1,5 +1,5 @@
tensorcircuit.backends
-==================================================
+================================================================================
.. toctree::
backends/backend_factory.rst
backends/cupy_backend.rst
diff --git a/docs/source/api/backends/backend_factory.rst b/docs/source/api/backends/backend_factory.rst
index 8864abfe..6df6374e 100644
--- a/docs/source/api/backends/backend_factory.rst
+++ b/docs/source/api/backends/backend_factory.rst
@@ -1,5 +1,5 @@
tensorcircuit.backends.backend_factory
-==================================================
+================================================================================
.. automodule:: tensorcircuit.backends.backend_factory
:members:
:undoc-members:
diff --git a/docs/source/api/backends/cupy_backend.rst b/docs/source/api/backends/cupy_backend.rst
index 743fe8f3..1e2421eb 100644
--- a/docs/source/api/backends/cupy_backend.rst
+++ b/docs/source/api/backends/cupy_backend.rst
@@ -1,5 +1,5 @@
tensorcircuit.backends.cupy_backend
-==================================================
+================================================================================
.. automodule:: tensorcircuit.backends.cupy_backend
:members:
:undoc-members:
diff --git a/docs/source/api/backends/jax_backend.rst b/docs/source/api/backends/jax_backend.rst
index e0dfe7c3..209409bc 100644
--- a/docs/source/api/backends/jax_backend.rst
+++ b/docs/source/api/backends/jax_backend.rst
@@ -1,5 +1,5 @@
tensorcircuit.backends.jax_backend
-==================================================
+================================================================================
.. automodule:: tensorcircuit.backends.jax_backend
:members:
:undoc-members:
diff --git a/docs/source/api/backends/numpy_backend.rst b/docs/source/api/backends/numpy_backend.rst
index af19d26b..735f969f 100644
--- a/docs/source/api/backends/numpy_backend.rst
+++ b/docs/source/api/backends/numpy_backend.rst
@@ -1,5 +1,5 @@
tensorcircuit.backends.numpy_backend
-==================================================
+================================================================================
.. automodule:: tensorcircuit.backends.numpy_backend
:members:
:undoc-members:
diff --git a/docs/source/api/backends/pytorch_backend.rst b/docs/source/api/backends/pytorch_backend.rst
index df2712c6..0d10f664 100644
--- a/docs/source/api/backends/pytorch_backend.rst
+++ b/docs/source/api/backends/pytorch_backend.rst
@@ -1,5 +1,5 @@
tensorcircuit.backends.pytorch_backend
-==================================================
+================================================================================
.. automodule:: tensorcircuit.backends.pytorch_backend
:members:
:undoc-members:
diff --git a/docs/source/api/backends/tensorflow_backend.rst b/docs/source/api/backends/tensorflow_backend.rst
index 52663b1a..a595418e 100644
--- a/docs/source/api/backends/tensorflow_backend.rst
+++ b/docs/source/api/backends/tensorflow_backend.rst
@@ -1,5 +1,5 @@
tensorcircuit.backends.tensorflow_backend
-==================================================
+================================================================================
.. automodule:: tensorcircuit.backends.tensorflow_backend
:members:
:undoc-members:
diff --git a/docs/source/api/basecircuit.rst b/docs/source/api/basecircuit.rst
index 6b014bb1..79c2636e 100644
--- a/docs/source/api/basecircuit.rst
+++ b/docs/source/api/basecircuit.rst
@@ -1,5 +1,5 @@
tensorcircuit.basecircuit
-==================================================
+================================================================================
.. automodule:: tensorcircuit.basecircuit
:members:
:undoc-members:
diff --git a/docs/source/api/channels.rst b/docs/source/api/channels.rst
index 3a7cf3af..d9d6fd00 100644
--- a/docs/source/api/channels.rst
+++ b/docs/source/api/channels.rst
@@ -1,5 +1,5 @@
tensorcircuit.channels
-==================================================
+================================================================================
.. automodule:: tensorcircuit.channels
:members:
:undoc-members:
diff --git a/docs/source/api/circuit.rst b/docs/source/api/circuit.rst
index 910c5ef6..59c76ddd 100644
--- a/docs/source/api/circuit.rst
+++ b/docs/source/api/circuit.rst
@@ -1,5 +1,5 @@
tensorcircuit.circuit
-==================================================
+================================================================================
.. automodule:: tensorcircuit.circuit
:members:
:undoc-members:
diff --git a/docs/source/api/cloud.rst b/docs/source/api/cloud.rst
index 65b9c714..be2faf7d 100644
--- a/docs/source/api/cloud.rst
+++ b/docs/source/api/cloud.rst
@@ -1,5 +1,5 @@
tensorcircuit.cloud
-==================================================
+================================================================================
.. toctree::
cloud/abstraction.rst
cloud/apis.rst
diff --git a/docs/source/api/cloud/abstraction.rst b/docs/source/api/cloud/abstraction.rst
index b0a50812..3f00247c 100644
--- a/docs/source/api/cloud/abstraction.rst
+++ b/docs/source/api/cloud/abstraction.rst
@@ -1,5 +1,5 @@
tensorcircuit.cloud.abstraction
-==================================================
+================================================================================
.. automodule:: tensorcircuit.cloud.abstraction
:members:
:undoc-members:
diff --git a/docs/source/api/cloud/apis.rst b/docs/source/api/cloud/apis.rst
index 648baa66..fe623eec 100644
--- a/docs/source/api/cloud/apis.rst
+++ b/docs/source/api/cloud/apis.rst
@@ -1,5 +1,5 @@
tensorcircuit.cloud.apis
-==================================================
+================================================================================
.. automodule:: tensorcircuit.cloud.apis
:members:
:undoc-members:
diff --git a/docs/source/api/cloud/config.rst b/docs/source/api/cloud/config.rst
index a4338784..8f6282a0 100644
--- a/docs/source/api/cloud/config.rst
+++ b/docs/source/api/cloud/config.rst
@@ -1,5 +1,5 @@
tensorcircuit.cloud.config
-==================================================
+================================================================================
.. automodule:: tensorcircuit.cloud.config
:members:
:undoc-members:
diff --git a/docs/source/api/cloud/local.rst b/docs/source/api/cloud/local.rst
index c3ae767d..649f66d6 100644
--- a/docs/source/api/cloud/local.rst
+++ b/docs/source/api/cloud/local.rst
@@ -1,5 +1,5 @@
tensorcircuit.cloud.local
-==================================================
+================================================================================
.. automodule:: tensorcircuit.cloud.local
:members:
:undoc-members:
diff --git a/docs/source/api/cloud/quafu_provider.rst b/docs/source/api/cloud/quafu_provider.rst
index eae158af..06d15eee 100644
--- a/docs/source/api/cloud/quafu_provider.rst
+++ b/docs/source/api/cloud/quafu_provider.rst
@@ -1,5 +1,5 @@
tensorcircuit.cloud.quafu_provider
-==================================================
+================================================================================
.. automodule:: tensorcircuit.cloud.quafu_provider
:members:
:undoc-members:
diff --git a/docs/source/api/cloud/tencent.rst b/docs/source/api/cloud/tencent.rst
index b38a7183..431c3294 100644
--- a/docs/source/api/cloud/tencent.rst
+++ b/docs/source/api/cloud/tencent.rst
@@ -1,5 +1,5 @@
tensorcircuit.cloud.tencent
-==================================================
+================================================================================
.. automodule:: tensorcircuit.cloud.tencent
:members:
:undoc-members:
diff --git a/docs/source/api/cloud/utils.rst b/docs/source/api/cloud/utils.rst
index 190ea5a4..a7e33fe4 100644
--- a/docs/source/api/cloud/utils.rst
+++ b/docs/source/api/cloud/utils.rst
@@ -1,5 +1,5 @@
tensorcircuit.cloud.utils
-==================================================
+================================================================================
.. automodule:: tensorcircuit.cloud.utils
:members:
:undoc-members:
diff --git a/docs/source/api/cloud/wrapper.rst b/docs/source/api/cloud/wrapper.rst
index bac6a502..d65d3c07 100644
--- a/docs/source/api/cloud/wrapper.rst
+++ b/docs/source/api/cloud/wrapper.rst
@@ -1,5 +1,5 @@
tensorcircuit.cloud.wrapper
-==================================================
+================================================================================
.. automodule:: tensorcircuit.cloud.wrapper
:members:
:undoc-members:
diff --git a/docs/source/api/compiler.rst b/docs/source/api/compiler.rst
index c83bc2bd..cb47419f 100644
--- a/docs/source/api/compiler.rst
+++ b/docs/source/api/compiler.rst
@@ -1,5 +1,5 @@
tensorcircuit.compiler
-==================================================
+================================================================================
.. toctree::
compiler/composed_compiler.rst
compiler/qiskit_compiler.rst
diff --git a/docs/source/api/compiler/composed_compiler.rst b/docs/source/api/compiler/composed_compiler.rst
index c856636d..07f7f23e 100644
--- a/docs/source/api/compiler/composed_compiler.rst
+++ b/docs/source/api/compiler/composed_compiler.rst
@@ -1,5 +1,5 @@
tensorcircuit.compiler.composed_compiler
-==================================================
+================================================================================
.. automodule:: tensorcircuit.compiler.composed_compiler
:members:
:undoc-members:
diff --git a/docs/source/api/compiler/qiskit_compiler.rst b/docs/source/api/compiler/qiskit_compiler.rst
index 369b4740..b46ae8dc 100644
--- a/docs/source/api/compiler/qiskit_compiler.rst
+++ b/docs/source/api/compiler/qiskit_compiler.rst
@@ -1,5 +1,5 @@
tensorcircuit.compiler.qiskit_compiler
-==================================================
+================================================================================
.. automodule:: tensorcircuit.compiler.qiskit_compiler
:members:
:undoc-members:
diff --git a/docs/source/api/compiler/simple_compiler.rst b/docs/source/api/compiler/simple_compiler.rst
index 0578d8e5..941efba5 100644
--- a/docs/source/api/compiler/simple_compiler.rst
+++ b/docs/source/api/compiler/simple_compiler.rst
@@ -1,5 +1,5 @@
tensorcircuit.compiler.simple_compiler
-==================================================
+================================================================================
.. automodule:: tensorcircuit.compiler.simple_compiler
:members:
:undoc-members:
diff --git a/docs/source/api/cons.rst b/docs/source/api/cons.rst
index 6e077058..d4f48ab6 100644
--- a/docs/source/api/cons.rst
+++ b/docs/source/api/cons.rst
@@ -1,5 +1,5 @@
tensorcircuit.cons
-==================================================
+================================================================================
.. automodule:: tensorcircuit.cons
:members:
:undoc-members:
diff --git a/docs/source/api/densitymatrix.rst b/docs/source/api/densitymatrix.rst
index 571647d2..274dc323 100644
--- a/docs/source/api/densitymatrix.rst
+++ b/docs/source/api/densitymatrix.rst
@@ -1,5 +1,5 @@
tensorcircuit.densitymatrix
-==================================================
+================================================================================
.. automodule:: tensorcircuit.densitymatrix
:members:
:undoc-members:
diff --git a/docs/source/api/experimental.rst b/docs/source/api/experimental.rst
index 16761d4c..dbdfa068 100644
--- a/docs/source/api/experimental.rst
+++ b/docs/source/api/experimental.rst
@@ -1,5 +1,5 @@
tensorcircuit.experimental
-==================================================
+================================================================================
.. automodule:: tensorcircuit.experimental
:members:
:undoc-members:
diff --git a/docs/source/api/fgs.rst b/docs/source/api/fgs.rst
new file mode 100644
index 00000000..f00001b4
--- /dev/null
+++ b/docs/source/api/fgs.rst
@@ -0,0 +1,7 @@
+tensorcircuit.fgs
+================================================================================
+.. automodule:: tensorcircuit.fgs
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/gates.rst b/docs/source/api/gates.rst
index 8f72fbcc..71428553 100644
--- a/docs/source/api/gates.rst
+++ b/docs/source/api/gates.rst
@@ -1,5 +1,5 @@
tensorcircuit.gates
-==================================================
+================================================================================
.. automodule:: tensorcircuit.gates
:members:
:undoc-members:
diff --git a/docs/source/api/interfaces.rst b/docs/source/api/interfaces.rst
index 6371d824..5b234d0f 100644
--- a/docs/source/api/interfaces.rst
+++ b/docs/source/api/interfaces.rst
@@ -1,5 +1,5 @@
tensorcircuit.interfaces
-==================================================
+================================================================================
.. toctree::
interfaces/numpy.rst
interfaces/scipy.rst
diff --git a/docs/source/api/interfaces/numpy.rst b/docs/source/api/interfaces/numpy.rst
index 5df8b0bb..5271b873 100644
--- a/docs/source/api/interfaces/numpy.rst
+++ b/docs/source/api/interfaces/numpy.rst
@@ -1,5 +1,5 @@
tensorcircuit.interfaces.numpy
-==================================================
+================================================================================
.. automodule:: tensorcircuit.interfaces.numpy
:members:
:undoc-members:
diff --git a/docs/source/api/interfaces/scipy.rst b/docs/source/api/interfaces/scipy.rst
index c263bd93..284dcbe9 100644
--- a/docs/source/api/interfaces/scipy.rst
+++ b/docs/source/api/interfaces/scipy.rst
@@ -1,5 +1,5 @@
tensorcircuit.interfaces.scipy
-==================================================
+================================================================================
.. automodule:: tensorcircuit.interfaces.scipy
:members:
:undoc-members:
diff --git a/docs/source/api/interfaces/tensorflow.rst b/docs/source/api/interfaces/tensorflow.rst
index e02981b9..8ac1a344 100644
--- a/docs/source/api/interfaces/tensorflow.rst
+++ b/docs/source/api/interfaces/tensorflow.rst
@@ -1,5 +1,5 @@
tensorcircuit.interfaces.tensorflow
-==================================================
+================================================================================
.. automodule:: tensorcircuit.interfaces.tensorflow
:members:
:undoc-members:
diff --git a/docs/source/api/interfaces/tensortrans.rst b/docs/source/api/interfaces/tensortrans.rst
index b666e177..a92b166d 100644
--- a/docs/source/api/interfaces/tensortrans.rst
+++ b/docs/source/api/interfaces/tensortrans.rst
@@ -1,5 +1,5 @@
tensorcircuit.interfaces.tensortrans
-==================================================
+================================================================================
.. automodule:: tensorcircuit.interfaces.tensortrans
:members:
:undoc-members:
diff --git a/docs/source/api/interfaces/torch.rst b/docs/source/api/interfaces/torch.rst
index 28090f54..5f7e3dea 100644
--- a/docs/source/api/interfaces/torch.rst
+++ b/docs/source/api/interfaces/torch.rst
@@ -1,5 +1,5 @@
tensorcircuit.interfaces.torch
-==================================================
+================================================================================
.. automodule:: tensorcircuit.interfaces.torch
:members:
:undoc-members:
diff --git a/docs/source/api/keras.rst b/docs/source/api/keras.rst
index 5ed313b2..9f2e4860 100644
--- a/docs/source/api/keras.rst
+++ b/docs/source/api/keras.rst
@@ -1,5 +1,5 @@
tensorcircuit.keras
-==================================================
+================================================================================
.. automodule:: tensorcircuit.keras
:members:
:undoc-members:
diff --git a/docs/source/api/mps_base.rst b/docs/source/api/mps_base.rst
index caf11b36..039da259 100644
--- a/docs/source/api/mps_base.rst
+++ b/docs/source/api/mps_base.rst
@@ -1,5 +1,5 @@
tensorcircuit.mps_base
-==================================================
+================================================================================
.. automodule:: tensorcircuit.mps_base
:members:
:undoc-members:
diff --git a/docs/source/api/mpscircuit.rst b/docs/source/api/mpscircuit.rst
index a4de8119..58a68f56 100644
--- a/docs/source/api/mpscircuit.rst
+++ b/docs/source/api/mpscircuit.rst
@@ -1,5 +1,5 @@
tensorcircuit.mpscircuit
-==================================================
+================================================================================
.. automodule:: tensorcircuit.mpscircuit
:members:
:undoc-members:
diff --git a/docs/source/api/noisemodel.rst b/docs/source/api/noisemodel.rst
index ab152857..4930d8f0 100644
--- a/docs/source/api/noisemodel.rst
+++ b/docs/source/api/noisemodel.rst
@@ -1,5 +1,5 @@
tensorcircuit.noisemodel
-==================================================
+================================================================================
.. automodule:: tensorcircuit.noisemodel
:members:
:undoc-members:
diff --git a/docs/source/api/quantum.rst b/docs/source/api/quantum.rst
index c9d13b6b..f25c8a5d 100644
--- a/docs/source/api/quantum.rst
+++ b/docs/source/api/quantum.rst
@@ -1,5 +1,5 @@
tensorcircuit.quantum
-==================================================
+================================================================================
.. automodule:: tensorcircuit.quantum
:members:
:undoc-members:
diff --git a/docs/source/api/results.rst b/docs/source/api/results.rst
index 0bea95e7..2e60327c 100644
--- a/docs/source/api/results.rst
+++ b/docs/source/api/results.rst
@@ -1,5 +1,6 @@
tensorcircuit.results
-==================================================
+================================================================================
.. toctree::
results/counts.rst
+ results/qem.rst
results/readout_mitigation.rst
\ No newline at end of file
diff --git a/docs/source/api/results/counts.rst b/docs/source/api/results/counts.rst
index 7542d722..7f145206 100644
--- a/docs/source/api/results/counts.rst
+++ b/docs/source/api/results/counts.rst
@@ -1,5 +1,5 @@
tensorcircuit.results.counts
-==================================================
+================================================================================
.. automodule:: tensorcircuit.results.counts
:members:
:undoc-members:
diff --git a/docs/source/api/results/qem.rst b/docs/source/api/results/qem.rst
new file mode 100644
index 00000000..160098f7
--- /dev/null
+++ b/docs/source/api/results/qem.rst
@@ -0,0 +1,5 @@
+tensorcircuit.results.qem
+================================================================================
+.. toctree::
+ qem/benchmark_circuits.rst
+ qem/qem_methods.rst
\ No newline at end of file
diff --git a/docs/source/api/results/qem/benchmark_circuits.rst b/docs/source/api/results/qem/benchmark_circuits.rst
new file mode 100644
index 00000000..3c339884
--- /dev/null
+++ b/docs/source/api/results/qem/benchmark_circuits.rst
@@ -0,0 +1,7 @@
+tensorcircuit.results.qem.benchmark_circuits
+================================================================================
+.. automodule:: tensorcircuit.results.qem.benchmark_circuits
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/results/qem/qem_methods.rst b/docs/source/api/results/qem/qem_methods.rst
new file mode 100644
index 00000000..a95bdf95
--- /dev/null
+++ b/docs/source/api/results/qem/qem_methods.rst
@@ -0,0 +1,7 @@
+tensorcircuit.results.qem.qem_methods
+================================================================================
+.. automodule:: tensorcircuit.results.qem.qem_methods
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/results/readout_mitigation.rst b/docs/source/api/results/readout_mitigation.rst
index 0d9baa3d..325fe21a 100644
--- a/docs/source/api/results/readout_mitigation.rst
+++ b/docs/source/api/results/readout_mitigation.rst
@@ -1,5 +1,5 @@
tensorcircuit.results.readout_mitigation
-==================================================
+================================================================================
.. automodule:: tensorcircuit.results.readout_mitigation
:members:
:undoc-members:
diff --git a/docs/source/api/shadows.rst b/docs/source/api/shadows.rst
new file mode 100644
index 00000000..7aea082e
--- /dev/null
+++ b/docs/source/api/shadows.rst
@@ -0,0 +1,7 @@
+tensorcircuit.shadows
+================================================================================
+.. automodule:: tensorcircuit.shadows
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/simplify.rst b/docs/source/api/simplify.rst
index c1816c31..22833f9f 100644
--- a/docs/source/api/simplify.rst
+++ b/docs/source/api/simplify.rst
@@ -1,5 +1,5 @@
tensorcircuit.simplify
-==================================================
+================================================================================
.. automodule:: tensorcircuit.simplify
:members:
:undoc-members:
diff --git a/docs/source/api/templates.rst b/docs/source/api/templates.rst
index 330fa6db..202b049d 100644
--- a/docs/source/api/templates.rst
+++ b/docs/source/api/templates.rst
@@ -1,9 +1,10 @@
tensorcircuit.templates
-==================================================
+================================================================================
.. toctree::
+ templates/ansatz.rst
templates/blocks.rst
templates/chems.rst
+ templates/conversions.rst
templates/dataset.rst
- templates/ensemble.rst
templates/graphs.rst
templates/measurements.rst
\ No newline at end of file
diff --git a/docs/source/api/templates/ansatz.rst b/docs/source/api/templates/ansatz.rst
new file mode 100644
index 00000000..15f19650
--- /dev/null
+++ b/docs/source/api/templates/ansatz.rst
@@ -0,0 +1,7 @@
+tensorcircuit.templates.ansatz
+================================================================================
+.. automodule:: tensorcircuit.templates.ansatz
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/templates/blocks.rst b/docs/source/api/templates/blocks.rst
index 0c88f3d9..b7a0945a 100644
--- a/docs/source/api/templates/blocks.rst
+++ b/docs/source/api/templates/blocks.rst
@@ -1,5 +1,5 @@
tensorcircuit.templates.blocks
-==================================================
+================================================================================
.. automodule:: tensorcircuit.templates.blocks
:members:
:undoc-members:
diff --git a/docs/source/api/templates/chems.rst b/docs/source/api/templates/chems.rst
index 8a31f9d3..d06d9e39 100644
--- a/docs/source/api/templates/chems.rst
+++ b/docs/source/api/templates/chems.rst
@@ -1,5 +1,5 @@
tensorcircuit.templates.chems
-==================================================
+================================================================================
.. automodule:: tensorcircuit.templates.chems
:members:
:undoc-members:
diff --git a/docs/source/api/templates/conversions.rst b/docs/source/api/templates/conversions.rst
new file mode 100644
index 00000000..38cbe47f
--- /dev/null
+++ b/docs/source/api/templates/conversions.rst
@@ -0,0 +1,7 @@
+tensorcircuit.templates.conversions
+================================================================================
+.. automodule:: tensorcircuit.templates.conversions
+ :members:
+ :undoc-members:
+ :show-inheritance:
+ :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/templates/dataset.rst b/docs/source/api/templates/dataset.rst
index 36b9e510..aa6cdfa7 100644
--- a/docs/source/api/templates/dataset.rst
+++ b/docs/source/api/templates/dataset.rst
@@ -1,5 +1,5 @@
tensorcircuit.templates.dataset
-==================================================
+================================================================================
.. automodule:: tensorcircuit.templates.dataset
:members:
:undoc-members:
diff --git a/docs/source/api/templates/ensemble.rst b/docs/source/api/templates/ensemble.rst
deleted file mode 100644
index c7dd6f85..00000000
--- a/docs/source/api/templates/ensemble.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-tensorcircuit.templates.ensemble
-==================================================
-.. automodule:: tensorcircuit.templates.ensemble
- :members:
- :undoc-members:
- :show-inheritance:
- :inherited-members:
\ No newline at end of file
diff --git a/docs/source/api/templates/graphs.rst b/docs/source/api/templates/graphs.rst
index 0a2141f0..b86ab51e 100644
--- a/docs/source/api/templates/graphs.rst
+++ b/docs/source/api/templates/graphs.rst
@@ -1,5 +1,5 @@
tensorcircuit.templates.graphs
-==================================================
+================================================================================
.. automodule:: tensorcircuit.templates.graphs
:members:
:undoc-members:
diff --git a/docs/source/api/templates/measurements.rst b/docs/source/api/templates/measurements.rst
index 2113f03b..7e05673c 100644
--- a/docs/source/api/templates/measurements.rst
+++ b/docs/source/api/templates/measurements.rst
@@ -1,5 +1,5 @@
tensorcircuit.templates.measurements
-==================================================
+================================================================================
.. automodule:: tensorcircuit.templates.measurements
:members:
:undoc-members:
diff --git a/docs/source/api/torchnn.rst b/docs/source/api/torchnn.rst
index 5a5b2775..9f9c6598 100644
--- a/docs/source/api/torchnn.rst
+++ b/docs/source/api/torchnn.rst
@@ -1,5 +1,5 @@
tensorcircuit.torchnn
-==================================================
+================================================================================
.. automodule:: tensorcircuit.torchnn
:members:
:undoc-members:
diff --git a/docs/source/api/translation.rst b/docs/source/api/translation.rst
index a33667f7..f320c909 100644
--- a/docs/source/api/translation.rst
+++ b/docs/source/api/translation.rst
@@ -1,5 +1,5 @@
tensorcircuit.translation
-==================================================
+================================================================================
.. automodule:: tensorcircuit.translation
:members:
:undoc-members:
diff --git a/docs/source/api/utils.rst b/docs/source/api/utils.rst
index 3fa45319..93ee9496 100644
--- a/docs/source/api/utils.rst
+++ b/docs/source/api/utils.rst
@@ -1,5 +1,5 @@
tensorcircuit.utils
-==================================================
+================================================================================
.. automodule:: tensorcircuit.utils
:members:
:undoc-members:
diff --git a/docs/source/api/vis.rst b/docs/source/api/vis.rst
index f27680f1..2cdc89e2 100644
--- a/docs/source/api/vis.rst
+++ b/docs/source/api/vis.rst
@@ -1,5 +1,5 @@
tensorcircuit.vis
-==================================================
+================================================================================
.. automodule:: tensorcircuit.vis
:members:
:undoc-members:
diff --git a/docs/source/cnconf.py b/docs/source/cnconf.py
index 8b01d026..ceecf794 100644
--- a/docs/source/cnconf.py
+++ b/docs/source/cnconf.py
@@ -48,7 +48,9 @@
"sphinx_copybutton",
"nbsphinx",
"toctree_filter",
+ "sphinx.ext.napoleon",
"myst_parser",
+ "sphinx_design",
]
autosectionlabel_prefix_document = True
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 181dda0e..9d8147d9 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -50,6 +50,7 @@
"toctree_filter",
"sphinx.ext.napoleon",
"myst_parser",
+ "sphinx_design",
]
nbsphinx_allow_errors = True
diff --git a/docs/source/contribs/development_Mac.md b/docs/source/contribs/development_Mac.md
new file mode 100644
index 00000000..b2682f32
--- /dev/null
+++ b/docs/source/contribs/development_Mac.md
@@ -0,0 +1,113 @@
+# Tensorcircuit Installation Guide on MacOS
+
+Contributed by [_Mark (Zixuan) Song_](https://marksong.tech)
+
+Apple has updated Tensorflow (for MacOS) so that installation on M-series (until M2) and Intel-series Mac can follow the exact same procedure.
+
+## Starting From Scratch
+
+For completely new Macos or Macos without Xcode installed.
+
+If you have Xcode installed, skip to Install TC backends.
+
+### Install Xcode Command Line Tools
+
+Need graphical access to the machine.
+
+Run `xcode-select --install` to install if on optimal internet.
+
+Or Download it from [Apple](https://developer.apple.com/download/more/) Command Line Tools installation image then install it if the internet connection is weak.
+
+## Install TC Backends
+
+There are four backends to choose from, Numpy, Tensorflow, Jax, and Torch.
+
+### Install Jax, Pytorch (Optional)
+
+```bash
+pip install [Package Name]
+```
+### Install Tensorflow (Optional - Recommended)
+
+#### Install Miniconda (Optional - Recommended)
+
+If you wish to install Tensorflow optimized for MacOS (`tensorflow-macos`) or Tensorflow GPU optimized (`tensorflow-metal`) please install miniconda.
+
+If you wish to install Vanilla Tensorflow developed by Google (`tensorflow`) please skip this step.
+
+```bash
+curl -o ~/miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh
+bash ~/miniconda.sh -b -p $HOME/miniconda
+source ~/miniconda/bin/activate
+conda install -c apple tensorflow-deps
+```
+
+#### Installation
+
+```bash
+pip install tensorflow
+```
+
+If you wish to use tensorflow-metal PluggableDevice, then continue install (not recommended):
+
+```bash
+pip install tensorflow-metal
+```
+
+#### Verify Tensorflow Installation
+
+```python
+import tensorflow as tf
+
+cifar = tf.keras.datasets.cifar100
+(x_train, y_train), (x_test, y_test) = cifar.load_data()
+model = tf.keras.applications.ResNet50(
+ include_top=True,
+ weights=None,
+ input_shape=(32, 32, 3),
+ classes=100,)
+
+loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
+model.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
+model.fit(x_train, y_train, epochs=5, batch_size=64)
+```
+
+## Install Tensorcircuit
+
+```bash
+pip install tensorcircuit
+```
+
+## Benchmarking
+
+This data is collected by running `benchmarks/scripts/vqe_tc.py` 10 times and average results.
+
+
+
+ |
+ Vanilla Tensorflow |
+ Apple Tensorflow |
+ Apple Tensorflow with Metal Plugin |
+
+
+ Construction Time |
+ 11.49241641s |
+ 11.31878941s |
+ 11.6103961s |
+
+
+ Iteration time |
+ 0.002313011s |
+ 0.002333004s |
+ 0.046412581s |
+
+
+ Total time |
+ 11.72371747s |
+ 11.55208979s |
+ 16.25165417s |
+
+
+
+
+Until July 2023, this has been tested on Intel Macs running Ventura, M1 Macs running Ventura, M2 Macs running Ventura, and M2 Macs running Sonoma beta.
\ No newline at end of file
diff --git a/docs/source/contribs/development_MacARM.md b/docs/source/contribs/development_MacARM.md
index ffddf582..73c63948 100644
--- a/docs/source/contribs/development_MacARM.md
+++ b/docs/source/contribs/development_MacARM.md
@@ -2,6 +2,9 @@
Contributed by Mark (Zixuan) Song
+.. warning::
+ This page is deprecated. Please visit `the update tutorial `_ for the latest information.
+
## Starting From Scratch
For completely new macos or macos without xcode and brew
@@ -43,13 +46,7 @@ pip install [Package Name]
### Install Tensorflow (Optional)
-#### Install Tensorflow (Recommended Approach)
-
-❗️ Tensorflow with MacOS optimization would not function correctly in version 2.11.0 and before. Do not use this version of tensorflow if you intented to train any machine learning model.
-
-FYI: Error can occur when machine learning training or gpu related code is involved.
-
-⚠️ Tensorflow without macos optimization does not support Metal API and utilizing GPU (both intel chips and M-series chips) until at least tensorflow 2.11. Tensorflow-macos would fail when running `tc.backend.to_dense()`
+#### Install Tensorflow without MacOS optimization
```
conda config --add channels conda-forge
@@ -75,13 +72,45 @@ model.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
model.fit(x_train, y_train, epochs=5, batch_size=64)
```
+#### Install Tensorflow with MacOS optimization (Recommended)
+
+For tensorflow version 2.13 or later:
+```
+pip install tensorflow
+pip install tensorflow-metal
+```
+
+For tensorflow version 2.12 or earlier:
+```
+pip install tensorflow-macos
+pip install tensorflow-metal
+```
+
+#### Verify Tensorflow Installation
+
+```
+import tensorflow as tf
+
+cifar = tf.keras.datasets.cifar100
+(x_train, y_train), (x_test, y_test) = cifar.load_data()
+model = tf.keras.applications.ResNet50(
+ include_top=True,
+ weights=None,
+ input_shape=(32, 32, 3),
+ classes=100,)
+
+loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
+model.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
+model.fit(x_train, y_train, epochs=5, batch_size=64)
+```
+
## Install Tensorcircuit
```
pip install tensorcircuit
```
-Testing Platform (Tested Feb 2023)
+Testing Platform (Tested Jun 2023)
- Platform 1:
- MacOS Ventura 13.1 (Build version 22C65)
@@ -89,3 +118,6 @@ Testing Platform (Tested Feb 2023)
- Platform 2:
- MacOS Ventura 13.2 (Build version 22D49)
- M1 Ultra (Virtual)
+- Platform 4:
+ - MacOS Sonoma 14.0 Beta 2 (Build version 23A5276g)
+ - M2 Max
\ No newline at end of file
diff --git a/docs/source/contribs/development_MacM1.rst b/docs/source/contribs/development_MacM1.rst
index 3df9c949..8ce9f058 100644
--- a/docs/source/contribs/development_MacM1.rst
+++ b/docs/source/contribs/development_MacM1.rst
@@ -4,7 +4,7 @@ Contributed by (Yuqin Chen)
.. warning::
- This page is deprecated. Please visit `the update tutorial `_ for latest information.
+ This page is deprecated. Please visit `the update tutorial `_ for the latest information.
Why We Can't Run TensorCircuit on TensorlowBackend with Apple M1
diff --git a/docs/source/contribs/development_MacM2.md b/docs/source/contribs/development_MacM2.md
new file mode 100644
index 00000000..b3daf5fb
--- /dev/null
+++ b/docs/source/contribs/development_MacM2.md
@@ -0,0 +1,53 @@
+# Tensorcircuit Installation Guide on MacOS
+
+Contributed by [Hong-Ye Hu](https://github.com/hongyehu)
+
+.. warning::
+ This page is deprecated. Please visit `the update tutorial `_ for the latest information.
+
+The key issue addressed in this document is **how to install both TensorFlow and Jax on a M2 chip MacOS without conflict**.
+
+## Starting From Scratch
+
+### Install Xcode Command Line Tools
+
+Need graphical access to the machine.
+
+Run `xcode-select --install` to install if on optimal internet.
+
+Or Download from [Apple](https://developer.apple.com/download/more/) Command Line Tools installation image then install if internet connection is weak.
+
+## Install Miniconda
+
+Due to the limitation of MacOS and packages, the lastest version of python does not always function as desired, thus miniconda installation is advised to solve the issues. And use anaconda virtual environment is always a good habit.
+
+```
+curl -o ~/miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh
+bash ~/miniconda.sh -b -p $HOME/miniconda
+source ~/miniconda/bin/activate
+```
+
+## Install Packages
+First, create a virtual environment, and make sure the python version is 3.8.5 by
+```
+conda create --name NewEnv python==3.8.5
+conda activate NewEnv
+```
+Then, install the TensorFlow from `.whl` file (file can be downloaded from this [URL](https://drive.google.com/drive/folders/1oSipZLnoeQB0Awz8U68KYeCPsULy_dQ7)). This will install TensorFlow version 2.4.1
+```
+pip install ~/Downloads/tensorflow-2.4.1-py3-none-any.whl
+```
+Next, one need to install **Jax** and **Optax** by
+```
+conda install jax==0.3.0
+conda install optax==0.1.4
+```
+Now, hopefully, you should be able to use both Jax and TensorFlow in this environment. But sometimes, it may give you an error "ERROR: package Chardet not found.".
+If that is the case, you can install it by `conda install chardet`.
+Lastly, install tensorcircuit
+```
+pip install tensorcircuit
+```
+This is the solution that seems to work for M2-chip MacOS. Please let me know if there is a better solution!
+
+
diff --git a/docs/source/contribs/development_Mac_cn.md b/docs/source/contribs/development_Mac_cn.md
new file mode 100644
index 00000000..f23fd01f
--- /dev/null
+++ b/docs/source/contribs/development_Mac_cn.md
@@ -0,0 +1,114 @@
+# MacOS Tensorcircuit 安装教程
+
+[_Mark (Zixuan) Song_](https://marksong.tech) 撰写
+
+由于苹果更新了Tensorflow,因此M系列(直到M2)和英特尔系列Mac上的安装可以遵循完全相同的过程。
+
+## 从头开始
+
+对于全新的Macos或未安装Xcode的Macos。
+
+若您已安装Xcode,请跳转到安装TC后端。
+
+### 安装Xcode命令行工具
+
+需要对机器的图形访问
+
+如果网络良好,请运行`xcode-select --install`进行安装。
+
+或者,如果网络连接不理想,请从[苹果](https://developer.apple.com/download/more/)下载命令行工具安装映像,然后进行安装。
+
+## 安装TC后端
+
+有四个后端可供选择,Numpy,Tensorflow,Jax和Torch。
+
+### 安装Jax、Pytorch(可选)
+
+```bash
+pip install [Package Name]
+```
+
+### 安装Tensorflow(可选 - 推荐)
+
+#### 安装miniconda(可选 - 推荐)
+
+若您希望使用苹果为MacOS优化的Tensorflow(`tensorflow-macos`)或使用Tensorflow GPU优化(`tensorflow-metal`)请安装mimiconda。
+
+若您希望使Google开发的原版Tensorflow(`tensorflow`)请跳过此步骤。
+
+```bash
+curl -o ~/miniconda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh
+bash ~/miniconda.sh -b -p $HOME/miniconda
+source ~/miniconda/bin/activate
+conda install -c apple tensorflow-deps
+```
+
+#### 安装步骤
+
+```bash
+pip install tensorflow
+```
+
+若您希望使用苹果为Tensorflow优化的Metal后端,请继续运行(不建议):
+
+```bash
+pip install tensorflow-metal
+```
+
+#### 验证Tensorflow安装
+
+```python
+import tensorflow as tf
+
+cifar = tf.keras.datasets.cifar100
+(x_train, y_train), (x_test, y_test) = cifar.load_data()
+model = tf.keras.applications.ResNet50(
+ include_top=True,
+ weights=None,
+ input_shape=(32, 32, 3),
+ classes=100,)
+
+loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
+model.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
+model.fit(x_train, y_train, epochs=5, batch_size=64)
+```
+
+## 安装Tensorcircuit
+
+```bash
+pip install tensorcircuit
+```
+
+## 测试与比较
+
+以下数据由运行`benchmarks/scripts/vqe_tc.py` 10次并取平均值得到。
+
+
+
+ |
+ 原版Tensorflow |
+ 苹果优化版Tensorflow |
+ 苹果优化版Tensorflow并安装Tensorflow Metal插件 |
+
+
+ 构建时间 |
+ 11.49241641s |
+ 11.31878941s |
+ 11.6103961s |
+
+
+ 迭代时间 |
+ 0.002313011s |
+ 0.002333004s |
+ 0.046412581s |
+
+
+ 从时间 |
+ 11.72371747s |
+ 11.55208979s |
+ 16.25165417s |
+
+
+
+
+直到2023年7月,这已在运行Ventura的英特尔i9 Mac、运行Ventura的M1 Mac、运行Ventura的M2 Mac、运行Sonoma测试版的M2 Mac上进行了测试。
\ No newline at end of file
diff --git a/docs/source/contribution.rst b/docs/source/contribution.rst
index 5e8d7385..d8dbd493 100644
--- a/docs/source/contribution.rst
+++ b/docs/source/contribution.rst
@@ -146,7 +146,59 @@ We use `sphinx `__ to manage the document
The source files for docs are .rst file in docs/source.
-For English docs, ``sphinx-build source build/html`` in docs dir is enough. The html version of the docs are in docs/build/html.
+For English docs, ``sphinx-build source build/html`` and ``make latexpdf LATEXMKOPTS="-silent"`` in docs dir are enough.
+The html and pdf version of the docs are in docs/build/html and docs/build/latex, respectively.
+
+**Formula Environment Attention**
+
+It should be noted that the formula environment ``$$CONTENT$$`` in markdown is equivalent to the ``equation`` environment in latex.
+Therefore, in the jupyter notebook documents, do not nest the formula environment in ``$$CONTENT$$`` that is incompatible with
+``equation`` in latex, such as ``eqnarray``, which will cause errors in the pdf file built by ``nbsphinx``.
+However, compatible formula environments can be used. For example, this legal code in markdown
+
+.. code-block:: markdown
+
+ $$
+ \begin{split}
+ X&=Y\\
+ &=Z
+ \end{split}
+ $$
+
+will be convert to
+
+.. code-block:: latex
+
+ \begin{equation}
+ \begin{split}
+ X&=Y\\
+ &=Z
+ \end{split}
+ \end{equation}
+
+in latex automatically by ``nbsphinx``, which is a legal latex code. However, this legal code in markdown
+
+.. code-block:: markdown
+
+ $$
+ \begin{eqnarray}
+ X&=&Y\\
+ &=&Z
+ \end{eqnarray}
+ $$
+
+will be convert to
+
+.. code-block:: latex
+
+ \begin{equation}
+ \begin{eqnarray}
+ X&=&Y\\
+ &=&Z
+ \end{eqnarray}
+ \end{equation}
+
+in latex, which is an illegal latex code.
**Auto Generation of API Docs:**
diff --git a/docs/source/generate_rst.py b/docs/source/generate_rst.py
index 60fcf0de..9b112c46 100644
--- a/docs/source/generate_rst.py
+++ b/docs/source/generate_rst.py
@@ -5,7 +5,7 @@
class RSTGenerator:
- title_line = "=" * 50
+ title_line = "=" * 80
toctree = ".. toctree::\n {}"
automodule = ".. automodule:: {}\n :members:\n :undoc-members:\n :show-inheritance:\n :inherited-members:"
@@ -21,11 +21,14 @@ def __init__(
def cleanup(self):
if os.path.exists("modules.rst"):
os.remove("modules.rst")
- shutil.rmtree(self.dfolder)
+ try:
+ shutil.rmtree(self.dfolder)
+ except FileNotFoundError:
+ pass
os.makedirs(self.dfolder)
def write(self, path, content):
- if type(content) == type([]):
+ if isinstance(content, list):
content = "\n".join(content)
with open(path, "w") as f:
@@ -33,70 +36,68 @@ def write(self, path, content):
print(f"Finish writing {path}")
- def single_file_module(self):
- """Process the module in the self.pfolder/*.py"""
-
- for module_name in glob.glob(pj(self.pfolder, "*.py")):
+ def _file_generate(self, package_parents):
+ file_list = []
+ for module_name in glob.glob(pj(self.pfolder, *package_parents, "*.py")):
module_name = os.path.basename(module_name)[:-3]
if module_name in self.ingnored_modules:
continue
- rst_file = pj(self.dfolder, f"{module_name}.rst")
+ rst_file = pj(self.dfolder, *package_parents, f"{module_name}.rst")
+ name = f"{self.name}"
+ for n in package_parents:
+ name += f".{n}"
+ name += f".{module_name}"
content = [
- f"{self.name}.{module_name}",
+ name,
self.title_line,
- self.automodule.format(f"{self.name}.{module_name}"),
+ self.automodule.format(name),
]
-
self.write(rst_file, content)
- self.tree[rst_file] = []
-
- def subdir_files_module(self):
- """Write the rst files for modules with subdir or files"""
- for subdir in glob.glob(pj(self.pfolder, "*/")):
+ if not package_parents:
+ upper = self.dfolder
+ else:
+ upper = package_parents[-1]
+ file_list.append(upper + f"/{module_name}.rst")
+ for subdir in glob.glob(pj(self.pfolder, *package_parents, "*/")):
if "_" in subdir:
continue
-
subdir = os.path.basename(os.path.normpath(subdir))
- os.makedirs(pj(self.dfolder, subdir), exist_ok=True)
- rst_file = pj(self.dfolder, f"{subdir}.rst")
- self.tree[rst_file] = []
-
- for module_name in glob.glob(pj(self.pfolder, subdir, f"*.py")):
- module_name = os.path.basename(module_name)[:-3]
- if module_name in self.ingnored_modules:
- continue
-
- content = [
- f"{self.name}.{subdir}.{module_name}",
- self.title_line,
- self.automodule.format(f"{self.name}.{subdir}.{module_name}"),
- ]
-
- self.write(pj(self.dfolder, subdir, f"{module_name}.rst"), content)
- self.tree[rst_file].append(f"{subdir}/{module_name}.rst")
-
+ os.makedirs(pj(self.dfolder, *package_parents, subdir), exist_ok=True)
+ rst_file = pj(self.dfolder, *package_parents, f"{subdir}.rst")
+ subdir_filelist = self._file_generate(package_parents + [subdir])
+
+ name = f"{self.name}"
+ for n in package_parents:
+ name += f".{n}"
+ name += f".{subdir}"
content = [
- f"{self.name}.{subdir}",
+ name,
self.title_line,
- self.toctree.format("\n ".join(sorted(self.tree[rst_file]))),
+ self.toctree.format("\n ".join(sorted(subdir_filelist))),
]
self.write(rst_file, content)
- def modules_file(self):
+ if not package_parents:
+ upper = self.dfolder
+ else:
+ upper = package_parents[-1]
+ file_list.append(upper + f"/{subdir}.rst")
+ return file_list
+
+ def modules_file(self, file_list):
"""Write the modules.rst"""
content = [
self.name,
self.title_line,
- self.toctree.format("\n ".join(sorted(self.tree.keys()))),
+ self.toctree.format("\n ".join(sorted(file_list))),
]
self.write("modules.rst", content)
def start(self):
self.cleanup()
- self.single_file_module()
- self.subdir_files_module()
- self.modules_file()
+ file_list = self._file_generate([])
+ self.modules_file(file_list)
if __name__ == "__main__":
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 6732f8fc..bd055a60 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -7,6 +7,9 @@ TensorCircuit Documentation
**Welcome and congratulations! You have found TensorCircuit.** 👏
+Introduction
+---------------
+
TensorCircuit is an open-source high-performance quantum computing software framework in Python.
* It is built for humans. 👽
@@ -25,34 +28,168 @@ With the help of TensorCircuit, now get ready to efficiently and elegantly solve
+
Relevant Links
--------------------
TensorCircuit is created and maintained by `Shi-Xin Zhang `_ and this version is released by `Tencent Quantum Lab `_.
The current core authors of TensorCircuit are `Shi-Xin Zhang `_ and `Yu-Qin Chen `_.
-We also thank `contributions `_ from the lab and the open source community.
+We also thank `contributions `_ from the open source community.
If you have any further questions or collaboration ideas, please use the issue tracker or forum below, or send email to shixinzhang#tencent.com.
-* Source code: https://github.com/tencent-quantum-lab/tensorcircuit
+.. card-carousel:: 2
+
+ .. card:: Source code
+ :link: https://github.com/tencent-quantum-lab/tensorcircuit
+ :shadow: md
+
+ GitHub
+
+
+ .. card:: Documentation
+ :link: https://tensorcircuit.readthedocs.io
+ :shadow: md
+
+ Readthedocs
+
+
+ .. card:: Whitepaper
+ :link: https://quantum-journal.org/papers/q-2023-02-02-912/
+ :shadow: md
+
+ *Quantum* journal
+
+
+ .. card:: Issue Tracker
+ :link: https://github.com/tencent-quantum-lab/tensorcircuit/issues
+ :shadow: md
+
+ GitHub Issues
+
+
+ .. card:: Forum
+ :link: https://github.com/tencent-quantum-lab/tensorcircuit/discussions
+ :shadow: md
+
+ GitHub Discussions
+
+
+ .. card:: PyPI
+ :link: https://pypi.org/project/tensorcircuit
+ :shadow: md
+
+ ``pip install``
+
+
+ .. card:: DockerHub
+ :link: https://hub.docker.com/repository/docker/tensorcircuit/tensorcircuit
+ :shadow: md
+
+ ``docker pull``
+
+
+ .. card:: Application
+ :link: https://github.com/tencent-quantum-lab/tensorcircuit#research-and-applications
+ :shadow: md
+
+ Research using TC
+
+
+ .. card:: Cloud
+ :link: https://quantum.tencent.com/cloud
+
+ Tencent Quantum Cloud
+
+
+
+
+..
+ * Source code: https://github.com/tencent-quantum-lab/tensorcircuit
+
+ * Documentation: https://tensorcircuit.readthedocs.io
+
+ * Software Whitepaper (published in Quantum): https://quantum-journal.org/papers/q-2023-02-02-912/
+
+ * Issue Tracker: https://github.com/tencent-quantum-lab/tensorcircuit/issues
+
+ * Forum: https://github.com/tencent-quantum-lab/tensorcircuit/discussions
+
+ * PyPI page: https://pypi.org/project/tensorcircuit
+
+ * DockerHub page: https://hub.docker.com/repository/docker/tensorcircuit/tensorcircuit
+
+ * Research and projects based on TensorCircuit: https://github.com/tencent-quantum-lab/tensorcircuit#research-and-applications
+
+ * Tencent Quantum Cloud Service: https://quantum.tencent.com/cloud/
+
+
+
+Unified Quantum Programming
+------------------------------
+
+TensorCircuit is unifying infrastructures and interfaces for quantum computing.
+
+.. grid:: 1 2 4 4
+ :margin: 0
+ :padding: 0
+ :gutter: 2
+
+ .. grid-item-card:: Unified Backends
+ :columns: 12 6 3 3
+ :shadow: md
+
+ Jax/TensorFlow/PyTorch/Numpy/Cupy
+
+ .. grid-item-card:: Unified Devices
+ :columns: 12 6 3 3
+ :shadow: md
+
+ CPU/GPU/TPU
+
+ .. grid-item-card:: Unified Providers
+ :columns: 12 6 3 3
+ :shadow: md
+
+ QPUs from different vendors
+
+ .. grid-item-card:: Unified Resources
+ :columns: 12 6 3 3
+ :shadow: md
+
+ local/cloud/HPC
+
+
+.. grid:: 1 2 4 4
+ :margin: 0
+ :padding: 0
+ :gutter: 2
-* Documentation: https://tensorcircuit.readthedocs.io
+ .. grid-item-card:: Unified Interfaces
+ :columns: 12 6 3 3
+ :shadow: md
-* Software Whitepaper (published in Quantum): https://quantum-journal.org/papers/q-2023-02-02-912/
+ numerical sim/hardware exp
-* Issue Tracker: https://github.com/tencent-quantum-lab/tensorcircuit/issues
+ .. grid-item-card:: Unified Engines
+ :columns: 12 6 3 3
+ :shadow: md
-* Forum: https://github.com/tencent-quantum-lab/tensorcircuit/discussions
+ ideal/noisy/approximate simulation
-* PyPI page: https://pypi.org/project/tensorcircuit
+ .. grid-item-card:: Unified Representations
+ :columns: 12 6 3 3
+ :shadow: md
-* DockerHub page: https://hub.docker.com/repository/docker/tensorcircuit/tensorcircuit
+ from/to_IR/qiskit/openqasm/json
-* Research and projects based on TensorCircuit: https://github.com/tencent-quantum-lab/tensorcircuit#research-and-applications
+ .. grid-item-card:: Unified Pipelines
+ :columns: 12 6 3 3
+ :shadow: md
-* Tencent Quantum Cloud Service: https://quantum.tencent.com/cloud/
+ stateless functional programming/stateful ML models
diff --git a/docs/source/locale/zh/LC_MESSAGES/api.po b/docs/source/locale/zh/LC_MESSAGES/api.po
index c3ed832b..b7958e1e 100644
--- a/docs/source/locale/zh/LC_MESSAGES/api.po
+++ b/docs/source/locale/zh/LC_MESSAGES/api.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: tensorcircuit\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2023-05-10 22:01+0800\n"
+"POT-Creation-Date: 2023-07-14 15:43+0800\n"
"PO-Revision-Date: 2022-04-13 14:58+0800\n"
"Last-Translator: Xinghan Yang\n"
"Language: cn\n"
@@ -16,7 +16,7 @@ msgstr ""
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.9.1\n"
+"Generated-By: Babel 2.12.1\n"
#: ../../source/api/about.rst:2
msgid "tensorcircuit.about"
@@ -65,45 +65,55 @@ msgstr ""
msgid "example"
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss
-#: keras.engine.base_layer.Layer.add_metric
-#: keras.engine.base_layer.Layer.add_update
-#: keras.engine.base_layer.Layer.add_weight keras.engine.base_layer.Layer.apply
-#: keras.engine.base_layer.Layer.build
-#: keras.engine.base_layer.Layer.compute_mask
-#: keras.engine.base_layer.Layer.compute_output_shape
-#: keras.engine.base_layer.Layer.compute_output_signature
-#: keras.engine.base_layer.Layer.from_config
-#: keras.engine.base_layer.Layer.get_input_at
-#: keras.engine.base_layer.Layer.get_input_mask_at
-#: keras.engine.base_layer.Layer.get_input_shape_at
-#: keras.engine.base_layer.Layer.get_losses_for
-#: keras.engine.base_layer.Layer.get_output_at
-#: keras.engine.base_layer.Layer.get_output_mask_at
-#: keras.engine.base_layer.Layer.get_output_shape_at
-#: keras.engine.base_layer.Layer.get_updates_for
-#: keras.engine.base_layer.Layer.set_weights keras.engine.training.Model.build
-#: keras.engine.training.Model.compile keras.engine.training.Model.evaluate
-#: keras.engine.training.Model.fit keras.engine.training.Model.from_config
-#: keras.engine.training.Model.get_layer
-#: keras.engine.training.Model.load_weights
-#: keras.engine.training.Model.make_predict_function
-#: keras.engine.training.Model.make_test_function
-#: keras.engine.training.Model.make_train_function
-#: keras.engine.training.Model.predict
-#: keras.engine.training.Model.predict_on_batch
-#: keras.engine.training.Model.predict_step keras.engine.training.Model.save
-#: keras.engine.training.Model.save_weights keras.engine.training.Model.summary
-#: keras.engine.training.Model.test_on_batch
-#: keras.engine.training.Model.test_step keras.engine.training.Model.to_json
-#: keras.engine.training.Model.to_yaml
-#: keras.engine.training.Model.train_on_batch
-#: keras.engine.training.Model.train_step
-#: keras.optimizer_v2.learning_rate_schedule.LearningRateSchedule.from_config
+#: keras.src.engine.base_layer.Layer.add_loss
+#: keras.src.engine.base_layer.Layer.add_metric
+#: keras.src.engine.base_layer.Layer.add_update
+#: keras.src.engine.base_layer.Layer.add_weight
+#: keras.src.engine.base_layer.Layer.build
+#: keras.src.engine.base_layer.Layer.build_from_config
+#: keras.src.engine.base_layer.Layer.compute_mask
+#: keras.src.engine.base_layer.Layer.compute_output_shape
+#: keras.src.engine.base_layer.Layer.compute_output_signature
+#: keras.src.engine.base_layer.Layer.from_config
+#: keras.src.engine.base_layer.Layer.get_input_at
+#: keras.src.engine.base_layer.Layer.get_input_mask_at
+#: keras.src.engine.base_layer.Layer.get_input_shape_at
+#: keras.src.engine.base_layer.Layer.get_output_at
+#: keras.src.engine.base_layer.Layer.get_output_mask_at
+#: keras.src.engine.base_layer.Layer.get_output_shape_at
+#: keras.src.engine.base_layer.Layer.load_own_variables
+#: keras.src.engine.base_layer.Layer.save_own_variables
+#: keras.src.engine.base_layer.Layer.set_weights
+#: keras.src.engine.training.Model.build
+#: keras.src.engine.training.Model.compile
+#: keras.src.engine.training.Model.compile_from_config
+#: keras.src.engine.training.Model.compute_loss
+#: keras.src.engine.training.Model.compute_metrics
+#: keras.src.engine.training.Model.evaluate
+#: keras.src.engine.training.Model.export keras.src.engine.training.Model.fit
+#: keras.src.engine.training.Model.from_config
+#: keras.src.engine.training.Model.get_layer
+#: keras.src.engine.training.Model.load_weights
+#: keras.src.engine.training.Model.make_predict_function
+#: keras.src.engine.training.Model.make_test_function
+#: keras.src.engine.training.Model.make_train_function
+#: keras.src.engine.training.Model.predict
+#: keras.src.engine.training.Model.predict_on_batch
+#: keras.src.engine.training.Model.predict_step
+#: keras.src.engine.training.Model.save
+#: keras.src.engine.training.Model.save_weights
+#: keras.src.engine.training.Model.summary
+#: keras.src.engine.training.Model.test_on_batch
+#: keras.src.engine.training.Model.test_step
+#: keras.src.engine.training.Model.to_json
+#: keras.src.engine.training.Model.to_yaml
+#: keras.src.engine.training.Model.train_on_batch
+#: keras.src.engine.training.Model.train_step
+#: keras.src.optimizers.schedules.learning_rate_schedule.LearningRateSchedule.from_config
#: of tensorcircuit.abstractcircuit.AbstractCircuit.append
#: tensorcircuit.abstractcircuit.AbstractCircuit.append_from_qir
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list
#: tensorcircuit.abstractcircuit.AbstractCircuit.barrier_instruction
#: tensorcircuit.abstractcircuit.AbstractCircuit.cond_measurement
#: tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps
@@ -601,6 +611,7 @@ msgstr ""
#: tensorcircuit.densitymatrix.DMCircuit.expectation
#: tensorcircuit.densitymatrix.DMCircuit.to_circuit
#: tensorcircuit.densitymatrix.DMCircuit2.apply_general_kraus_delayed..apply
+#: tensorcircuit.experimental.evol_global tensorcircuit.experimental.evol_local
#: tensorcircuit.experimental.hamiltonian_evol
#: tensorcircuit.experimental.parameter_shift_grad
#: tensorcircuit.experimental.parameter_shift_grad_v2
@@ -693,6 +704,12 @@ msgstr ""
#: tensorcircuit.quantum.truncated_free_energy tensorcircuit.quantum.xyz2ps
#: tensorcircuit.results.counts.expectation
#: tensorcircuit.results.counts.plot_histogram
+#: tensorcircuit.results.qem.qem_methods.add_dd
+#: tensorcircuit.results.qem.qem_methods.apply_dd
+#: tensorcircuit.results.qem.qem_methods.apply_rc
+#: tensorcircuit.results.qem.qem_methods.apply_zne
+#: tensorcircuit.results.qem.qem_methods.prune_ddcircuit
+#: tensorcircuit.results.qem.qem_methods.used_qubits
#: tensorcircuit.results.readout_mitigation.ReadoutMit.__init__
#: tensorcircuit.results.readout_mitigation.ReadoutMit.apply_correction
#: tensorcircuit.results.readout_mitigation.ReadoutMit.apply_readout_mitigation
@@ -710,7 +727,6 @@ msgstr ""
#: tensorcircuit.templates.blocks.example_block
#: tensorcircuit.templates.blocks.qft
#: tensorcircuit.templates.blocks.state_centric
-#: tensorcircuit.templates.chems.get_ps
#: tensorcircuit.templates.graphs.Grid2DCoord.__init__
#: tensorcircuit.templates.graphs.Grid2DCoord.all_cols
#: tensorcircuit.templates.graphs.Grid2DCoord.all_rows
@@ -816,7 +832,6 @@ msgstr ""
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.shape_tuple
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.sign
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.slice
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.trace
#: tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_transfer_operator
#: tensornetwork.matrixproductstates.base_mps.BaseMPS.check_orthonormality
@@ -871,39 +886,44 @@ msgid ""
"means plain concatenation."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight keras.engine.base_layer.Layer.apply
-#: keras.engine.base_layer.Layer.compute_mask
-#: keras.engine.base_layer.Layer.compute_output_shape
-#: keras.engine.base_layer.Layer.compute_output_signature
-#: keras.engine.base_layer.Layer.count_params
-#: keras.engine.base_layer.Layer.from_config
-#: keras.engine.base_layer.Layer.get_config
-#: keras.engine.base_layer.Layer.get_input_at
-#: keras.engine.base_layer.Layer.get_input_mask_at
-#: keras.engine.base_layer.Layer.get_input_shape_at
-#: keras.engine.base_layer.Layer.get_losses_for
-#: keras.engine.base_layer.Layer.get_output_at
-#: keras.engine.base_layer.Layer.get_output_mask_at
-#: keras.engine.base_layer.Layer.get_output_shape_at
-#: keras.engine.base_layer.Layer.get_updates_for
-#: keras.engine.base_layer.Layer.get_weights
-#: keras.engine.training.Model.evaluate keras.engine.training.Model.fit
-#: keras.engine.training.Model.from_config
-#: keras.engine.training.Model.get_config keras.engine.training.Model.get_layer
-#: keras.engine.training.Model.get_weights
-#: keras.engine.training.Model.load_weights
-#: keras.engine.training.Model.make_predict_function
-#: keras.engine.training.Model.make_test_function
-#: keras.engine.training.Model.make_train_function
-#: keras.engine.training.Model.predict
-#: keras.engine.training.Model.predict_on_batch
-#: keras.engine.training.Model.predict_step
-#: keras.engine.training.Model.test_on_batch
-#: keras.engine.training.Model.test_step keras.engine.training.Model.to_json
-#: keras.engine.training.Model.to_yaml
-#: keras.engine.training.Model.train_on_batch
-#: keras.engine.training.Model.train_step
-#: keras.optimizer_v2.learning_rate_schedule.LearningRateSchedule.from_config
+#: keras.src.engine.base_layer.Layer.add_weight
+#: keras.src.engine.base_layer.Layer.compute_mask
+#: keras.src.engine.base_layer.Layer.compute_output_shape
+#: keras.src.engine.base_layer.Layer.compute_output_signature
+#: keras.src.engine.base_layer.Layer.count_params
+#: keras.src.engine.base_layer.Layer.from_config
+#: keras.src.engine.base_layer.Layer.get_build_config
+#: keras.src.engine.base_layer.Layer.get_config
+#: keras.src.engine.base_layer.Layer.get_input_at
+#: keras.src.engine.base_layer.Layer.get_input_mask_at
+#: keras.src.engine.base_layer.Layer.get_input_shape_at
+#: keras.src.engine.base_layer.Layer.get_output_at
+#: keras.src.engine.base_layer.Layer.get_output_mask_at
+#: keras.src.engine.base_layer.Layer.get_output_shape_at
+#: keras.src.engine.base_layer.Layer.get_weights
+#: keras.src.engine.training.Model.compute_loss
+#: keras.src.engine.training.Model.compute_metrics
+#: keras.src.engine.training.Model.evaluate keras.src.engine.training.Model.fit
+#: keras.src.engine.training.Model.from_config
+#: keras.src.engine.training.Model.get_compile_config
+#: keras.src.engine.training.Model.get_config
+#: keras.src.engine.training.Model.get_layer
+#: keras.src.engine.training.Model.get_metrics_result
+#: keras.src.engine.training.Model.get_weight_paths
+#: keras.src.engine.training.Model.get_weights
+#: keras.src.engine.training.Model.make_predict_function
+#: keras.src.engine.training.Model.make_test_function
+#: keras.src.engine.training.Model.make_train_function
+#: keras.src.engine.training.Model.predict
+#: keras.src.engine.training.Model.predict_on_batch
+#: keras.src.engine.training.Model.predict_step
+#: keras.src.engine.training.Model.test_on_batch
+#: keras.src.engine.training.Model.test_step
+#: keras.src.engine.training.Model.to_json
+#: keras.src.engine.training.Model.to_yaml
+#: keras.src.engine.training.Model.train_on_batch
+#: keras.src.engine.training.Model.train_step
+#: keras.src.optimizers.schedules.learning_rate_schedule.LearningRateSchedule.from_config
#: of tensorcircuit.abstractcircuit.AbstractCircuit.append
#: tensorcircuit.abstractcircuit.AbstractCircuit.cond_measurement
#: tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps
@@ -1540,6 +1560,7 @@ msgstr ""
#: tensorcircuit.densitymatrix.DMCircuit.get_dm_as_quoperator
#: tensorcircuit.densitymatrix.DMCircuit.to_circuit
#: tensorcircuit.densitymatrix.DMCircuit.wavefunction
+#: tensorcircuit.experimental.evol_global tensorcircuit.experimental.evol_local
#: tensorcircuit.experimental.hamiltonian_evol
#: tensorcircuit.experimental.parameter_shift_grad
#: tensorcircuit.experimental.parameter_shift_grad_v2
@@ -1664,6 +1685,12 @@ msgstr ""
#: tensorcircuit.quantum.truncated_free_energy tensorcircuit.quantum.xyz2ps
#: tensorcircuit.results.counts.expectation
#: tensorcircuit.results.counts.plot_histogram
+#: tensorcircuit.results.qem.qem_methods.add_dd
+#: tensorcircuit.results.qem.qem_methods.apply_dd
+#: tensorcircuit.results.qem.qem_methods.apply_rc
+#: tensorcircuit.results.qem.qem_methods.apply_zne
+#: tensorcircuit.results.qem.qem_methods.prune_ddcircuit
+#: tensorcircuit.results.qem.qem_methods.used_qubits
#: tensorcircuit.results.readout_mitigation.ReadoutMit.apply_readout_mitigation
#: tensorcircuit.results.readout_mitigation.ReadoutMit.expectation
#: tensorcircuit.results.readout_mitigation.ReadoutMit.get_matrix
@@ -1679,7 +1706,6 @@ msgstr ""
#: tensorcircuit.templates.blocks.example_block
#: tensorcircuit.templates.blocks.qft
#: tensorcircuit.templates.blocks.state_centric
-#: tensorcircuit.templates.chems.get_ps
#: tensorcircuit.templates.graphs.Grid2DCoord.all_cols
#: tensorcircuit.templates.graphs.Grid2DCoord.all_rows
#: tensorcircuit.templates.graphs.Grid2DCoord.lattice_graph
@@ -1824,7 +1850,6 @@ msgstr ""
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.shape_tuple
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.subtraction
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.sum
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.trace
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.transpose
#: tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_transfer_operator
@@ -2324,6 +2349,7 @@ msgstr ""
#: tensorcircuit.densitymatrix.DMCircuit.get_dm_as_quoperator
#: tensorcircuit.densitymatrix.DMCircuit.to_circuit
#: tensorcircuit.densitymatrix.DMCircuit.wavefunction
+#: tensorcircuit.experimental.evol_global tensorcircuit.experimental.evol_local
#: tensorcircuit.experimental.hamiltonian_evol
#: tensorcircuit.experimental.parameter_shift_grad
#: tensorcircuit.experimental.parameter_shift_grad_v2
@@ -2414,6 +2440,12 @@ msgstr ""
#: tensorcircuit.quantum.truncated_free_energy tensorcircuit.quantum.xyz2ps
#: tensorcircuit.results.counts.expectation
#: tensorcircuit.results.counts.plot_histogram
+#: tensorcircuit.results.qem.qem_methods.add_dd
+#: tensorcircuit.results.qem.qem_methods.apply_dd
+#: tensorcircuit.results.qem.qem_methods.apply_rc
+#: tensorcircuit.results.qem.qem_methods.apply_zne
+#: tensorcircuit.results.qem.qem_methods.prune_ddcircuit
+#: tensorcircuit.results.qem.qem_methods.used_qubits
#: tensorcircuit.results.readout_mitigation.ReadoutMit.apply_readout_mitigation
#: tensorcircuit.results.readout_mitigation.ReadoutMit.expectation
#: tensorcircuit.results.readout_mitigation.ReadoutMit.get_matrix
@@ -2429,7 +2461,6 @@ msgstr ""
#: tensorcircuit.templates.blocks.example_block
#: tensorcircuit.templates.blocks.qft
#: tensorcircuit.templates.blocks.state_centric
-#: tensorcircuit.templates.chems.get_ps
#: tensorcircuit.templates.graphs.Grid2DCoord.all_cols
#: tensorcircuit.templates.graphs.Grid2DCoord.all_rows
#: tensorcircuit.templates.graphs.Grid2DCoord.lattice_graph
@@ -2508,7 +2539,6 @@ msgstr ""
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.inv
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.random_uniform
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.sum
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.trace
#: tensornetwork.matrixproductstates.base_mps.BaseMPS.apply_transfer_operator
#: tensornetwork.matrixproductstates.base_mps.BaseMPS.check_orthonormality
@@ -2632,6 +2662,8 @@ msgid "add a barrier instruction flag, no effect on numerical simulation"
msgstr ""
#: of tensorcircuit.abstractcircuit.AbstractCircuit.barrier_instruction:3
+#: tensorcircuit.abstractcircuit.AbstractCircuit.measure_instruction:3
+#: tensorcircuit.abstractcircuit.AbstractCircuit.reset_instruction:3
msgid "the corresponding qubits"
msgstr ""
@@ -2682,6 +2714,7 @@ msgid ""
"Visualise the circuit. This method recevies the keywords as same as "
"qiskit.circuit.QuantumCircuit.draw. More details can be found here: "
"https://qiskit.org/documentation/stubs/qiskit.circuit.QuantumCircuit.draw.html."
+" Interesting kws options include: ``idle_wires``(bool)"
msgstr ""
#: of tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps:1
@@ -2706,10 +2739,17 @@ msgid "sites to apply Z gate, defaults to None"
msgstr ""
#: of tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps:32
+msgid ""
+"or one can apply a ps structures instead of ``x``, ``y``, ``z``, e.g. [0,"
+" 1, 3, 0, 2, 2] for X_1Z_2Y_4Y_5 defaults to None, ``ps`` can overwrite "
+"``x``, ``y`` and ``z``"
+msgstr ""
+
+#: of tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps:36
msgid "whether to cache and reuse the wavefunction, defaults to True"
msgstr ""
-#: of tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps:34
+#: of tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps:38
#: tensorcircuit.basecircuit.BaseCircuit.sample_expectation_ps:46
#: tensorcircuit.circuit.Circuit.expectation:32
#: tensorcircuit.densitymatrix.DMCircuit.expectation:8
@@ -2718,7 +2758,7 @@ msgstr ""
msgid "Noise Configuration, defaults to None"
msgstr ""
-#: of tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps:36
+#: of tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps:40
#: tensorcircuit.basecircuit.BaseCircuit.sample_expectation_ps:48
#: tensorcircuit.circuit.Circuit.expectation:34
#: tensorcircuit.noisemodel.expectation_noisfy:7
@@ -2727,7 +2767,7 @@ msgid ""
" to 1000"
msgstr ""
-#: of tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps:38
+#: of tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps:42
#: tensorcircuit.basecircuit.BaseCircuit.sample_expectation_ps:50
#: tensorcircuit.circuit.Circuit.expectation:36
#: tensorcircuit.densitymatrix.DMCircuit.expectation:10
@@ -2738,7 +2778,7 @@ msgid ""
"None, used for noisfy circuit sampling"
msgstr ""
-#: of tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps:41
+#: of tensorcircuit.abstractcircuit.AbstractCircuit.expectation_ps:45
msgid "Expectation value"
msgstr ""
@@ -2815,6 +2855,12 @@ msgstr ""
#: tensorcircuit.cloud.apis.set_token:13
#: tensorcircuit.cons.runtime_contractor:3
#: tensorcircuit.cons.set_function_contractor:3
+#: tensorcircuit.experimental.evol_global:4
+#: tensorcircuit.experimental.evol_global:9
+#: tensorcircuit.experimental.evol_global:11
+#: tensorcircuit.experimental.evol_local:4
+#: tensorcircuit.experimental.evol_local:6
+#: tensorcircuit.experimental.evol_local:13
#: tensorcircuit.experimental.hamiltonian_evol:4
#: tensorcircuit.experimental.hamiltonian_evol:6
#: tensorcircuit.experimental.hamiltonian_evol:8 tensorcircuit.gates.u_gate:16
@@ -2824,8 +2870,8 @@ msgstr ""
#: tensorcircuit.quantum.ps2xyz:7 tensorcircuit.quantum.sample2count:3
#: tensorcircuit.quantum.sample2count:5 tensorcircuit.quantum.sample2count:9
#: tensorcircuit.quantum.xyz2ps:3 tensorcircuit.quantum.xyz2ps:7
-#: tensorcircuit.results.counts.plot_histogram:4
#: tensorcircuit.results.counts.plot_histogram:6
+#: tensorcircuit.results.counts.plot_histogram:8
#: tensorcircuit.templates.graphs.Grid2DCoord.lattice_graph:6
#: tensorcircuit.translation.eqasm2tc:3 tensorcircuit.translation.eqasm2tc:9
#: tensorcircuit.translation.qir2json:3 tensorcircuit.translation.qir2json:8
@@ -3006,11 +3052,6 @@ msgstr ""
msgid "add a measurement instruction flag, no effect on numerical simulation"
msgstr ""
-#: of tensorcircuit.abstractcircuit.AbstractCircuit.measure_instruction:3
-#: tensorcircuit.abstractcircuit.AbstractCircuit.reset_instruction:3
-msgid "the corresponding qubit"
-msgstr ""
-
#: of tensorcircuit.abstractcircuit.AbstractCircuit.prepend:1
msgid "prepend circuit ``c`` before"
msgstr ""
@@ -3766,7 +3807,7 @@ msgstr ""
#: of tensorcircuit.applications.van.MADE:1
#: tensorcircuit.applications.van.NMF:1
#: tensorcircuit.applications.van.PixelCNN:1
-msgid "Bases: :py:class:`~keras.engine.training.Model`"
+msgid "Bases: :py:class:`~keras.src.engine.training.Model`"
msgstr ""
#: of tensorcircuit.applications.van.MADE.activity_regularizer:1
@@ -3781,11 +3822,11 @@ msgstr ""
msgid "Optional regularizer function for the output of this layer."
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:1 of
+#: keras.src.engine.base_layer.Layer.add_loss:1 of
msgid "Add loss tensor(s), potentially dependent on layer inputs."
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:3 of
+#: keras.src.engine.base_layer.Layer.add_loss:3 of
msgid ""
"Some losses (for instance, activity regularization losses) may be "
"dependent on the inputs passed when calling a layer. Hence, when reusing "
@@ -3794,16 +3835,19 @@ msgid ""
"automatically keeps track of dependencies."
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:9 of
+#: keras.src.engine.base_layer.Layer.add_loss:9 of
msgid ""
"This method can be used inside a subclassed layer or model's `call` "
"function, in which case `losses` should be a Tensor or list of Tensors."
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:12
-#: keras.engine.base_layer.Layer.add_loss:26
-#: keras.engine.base_layer.Layer.add_loss:42
-#: keras.engine.training.Model.compile:3 keras.engine.training.Model.save:28 of
+#: keras.src.engine.base_layer.Layer.add_loss:12
+#: keras.src.engine.base_layer.Layer.add_loss:32
+#: keras.src.engine.base_layer.Layer.add_loss:48
+#: keras.src.engine.training.Model.compile:3
+#: keras.src.engine.training.Model.export:15
+#: keras.src.engine.training.Model.get_weight_paths:18
+#: keras.src.engine.training.Model.save:38 of
#: tensorcircuit.applications.van.MaskedConv2D.metrics:3
#: tensorcircuit.applications.van.MaskedLinear.metrics:3
#: tensorcircuit.applications.van.ResidualBlock.metrics:3
@@ -3813,22 +3857,24 @@ msgstr ""
msgid "Example:"
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:14 of
+#: keras.src.engine.base_layer.Layer.add_loss:14 of
msgid "```python class MyLayer(tf.keras.layers.Layer):"
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:17
-#: keras.engine.base_layer.Layer.add_metric:14 of
+#: keras.src.engine.base_layer.Layer.add_loss:17
+#: keras.src.engine.base_layer.Layer.add_metric:14
+#: keras.src.engine.training.Model.get_weight_paths:30 of
msgid "def call(self, inputs):"
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:17 of
+#: keras.src.engine.base_layer.Layer.add_loss:17 of
msgid "self.add_loss(tf.abs(tf.reduce_mean(inputs))) return inputs"
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:19
-#: keras.engine.base_layer.Layer.add_metric:16
-#: keras.engine.training.Model.compile:10 of
+#: keras.src.engine.base_layer.Layer.add_loss:19
+#: keras.src.engine.base_layer.Layer.add_metric:16
+#: keras.src.engine.training.Model.compile:10
+#: keras.src.engine.training.Model.compute_metrics:21 of
#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:21
#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigs:35
#: tensornetwork.backends.jax.jax_backend.JaxBackend.eigsh:21
@@ -3838,16 +3884,24 @@ msgstr ""
msgid "```"
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:21 of
+#: keras.src.engine.base_layer.Layer.add_loss:21 of
msgid ""
-"This method can also be called directly on a Functional Model during "
-"construction. In this case, any loss Tensors passed to this Model must be"
-" symbolic and be able to be traced back to the model's `Input`s. These "
-"losses become part of the model's topology and are tracked in "
+"The same code works in distributed training: the input to `add_loss()` is"
+" treated like a regularization loss and averaged across replicas by the "
+"training loop (both built-in `Model.fit()` and compliant custom training "
+"loops)."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.add_loss:26 of
+msgid ""
+"The `add_loss` method can also be called directly on a Functional Model "
+"during construction. In this case, any loss Tensors passed to this Model "
+"must be symbolic and be able to be traced back to the model's `Input`s. "
+"These losses become part of the model's topology and are tracked in "
"`get_config`."
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:28 of
+#: keras.src.engine.base_layer.Layer.add_loss:34 of
msgid ""
"```python inputs = tf.keras.Input(shape=(10,)) x = "
"tf.keras.layers.Dense(10)(inputs) outputs = tf.keras.layers.Dense(1)(x) "
@@ -3855,7 +3909,7 @@ msgid ""
"model.add_loss(tf.abs(tf.reduce_mean(x))) ```"
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:37 of
+#: keras.src.engine.base_layer.Layer.add_loss:43 of
msgid ""
"If this is not the case for your loss (if, for example, your loss "
"references a `Variable` of one of the model's layers), you can wrap your "
@@ -3863,7 +3917,7 @@ msgid ""
"the model's topology since they can't be serialized."
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:44 of
+#: keras.src.engine.base_layer.Layer.add_loss:50 of
msgid ""
"```python inputs = tf.keras.Input(shape=(10,)) d = "
"tf.keras.layers.Dense(10) x = d(inputs) outputs = "
@@ -3872,57 +3926,47 @@ msgid ""
"```"
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:54 of
+#: keras.src.engine.base_layer.Layer.add_loss:60 of
msgid ""
"Loss tensor, or list/tuple of tensors. Rather than tensors, losses may "
"also be zero-argument callables which create a loss tensor."
msgstr ""
-#: keras.engine.base_layer.Layer.add_loss:56 of
-msgid ""
-"Additional keyword arguments for backward compatibility. Accepted values:"
-" inputs - Deprecated, will be automatically inferred."
-msgstr ""
-
-#: keras.engine.base_layer.Layer.add_loss:56 of
-msgid "Additional keyword arguments for backward compatibility. Accepted values:"
-msgstr ""
-
-#: keras.engine.base_layer.Layer.add_loss:58 of
-msgid "inputs - Deprecated, will be automatically inferred."
+#: keras.src.engine.base_layer.Layer.add_loss:63 of
+msgid "Used for backwards compatibility only."
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:1 of
+#: keras.src.engine.base_layer.Layer.add_metric:1 of
msgid "Adds metric tensor to the layer."
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:3 of
+#: keras.src.engine.base_layer.Layer.add_metric:3 of
msgid ""
"This method can be used inside the `call()` method of a subclassed layer "
"or model."
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:6 of
+#: keras.src.engine.base_layer.Layer.add_metric:6 of
msgid "```python class MyMetricLayer(tf.keras.layers.Layer):"
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:10 of
+#: keras.src.engine.base_layer.Layer.add_metric:10 of
msgid "def __init__(self):"
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:9 of
+#: keras.src.engine.base_layer.Layer.add_metric:9 of
msgid ""
"super(MyMetricLayer, self).__init__(name='my_metric_layer') self.mean = "
"tf.keras.metrics.Mean(name='metric_1')"
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:13 of
+#: keras.src.engine.base_layer.Layer.add_metric:13 of
msgid ""
"self.add_metric(self.mean(inputs)) self.add_metric(tf.reduce_sum(inputs),"
" name='metric_2') return inputs"
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:18 of
+#: keras.src.engine.base_layer.Layer.add_metric:18 of
msgid ""
"This method can also be called directly on a Functional Model during "
"construction. In this case, any tensor passed to this Model must be "
@@ -3931,7 +3975,7 @@ msgid ""
" the model via `save()`."
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:24 of
+#: keras.src.engine.base_layer.Layer.add_metric:24 of
msgid ""
"```python inputs = tf.keras.Input(shape=(10,)) x = "
"tf.keras.layers.Dense(10)(inputs) outputs = tf.keras.layers.Dense(1)(x) "
@@ -3939,7 +3983,7 @@ msgid ""
"model.add_metric(math_ops.reduce_sum(x), name='metric_1') ```"
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:32 of
+#: keras.src.engine.base_layer.Layer.add_metric:32 of
msgid ""
"Note: Calling `add_metric()` with the result of a metric object on a "
"Functional Model, as shown in the example below, is not supported. This "
@@ -3947,7 +3991,7 @@ msgid ""
"inputs."
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:36 of
+#: keras.src.engine.base_layer.Layer.add_metric:37 of
msgid ""
"```python inputs = tf.keras.Input(shape=(10,)) x = "
"tf.keras.layers.Dense(10)(inputs) outputs = tf.keras.layers.Dense(1)(x) "
@@ -3955,15 +3999,15 @@ msgid ""
"model.add_metric(tf.keras.metrics.Mean()(x), name='metric_1') ```"
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:44 of
+#: keras.src.engine.base_layer.Layer.add_metric:45 of
msgid "Metric tensor."
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:45 of
+#: keras.src.engine.base_layer.Layer.add_metric:46 of
msgid "String metric name."
msgstr ""
-#: keras.engine.base_layer.Layer.add_metric:46 of
+#: keras.src.engine.base_layer.Layer.add_metric:47 of
msgid ""
"Additional keyword arguments for backward compatibility. Accepted values:"
" `aggregation` - When the `value` tensor provided is not the result of "
@@ -3971,11 +4015,11 @@ msgid ""
" a `keras.Metric.Mean`."
msgstr ""
-#: keras.engine.base_layer.Layer.add_update:1 of
+#: keras.src.engine.base_layer.Layer.add_update:1 of
msgid "Add update op(s), potentially dependent on layer inputs."
msgstr ""
-#: keras.engine.base_layer.Layer.add_update:3 of
+#: keras.src.engine.base_layer.Layer.add_update:3 of
msgid ""
"Weight updates (for instance, the updates of the moving mean and variance"
" in a BatchNormalization layer) may be dependent on the inputs passed "
@@ -3985,14 +4029,14 @@ msgid ""
"dependencies."
msgstr ""
-#: keras.engine.base_layer.Layer.add_update:10 of
+#: keras.src.engine.base_layer.Layer.add_update:10 of
msgid ""
"This call is ignored when eager execution is enabled (in that case, "
"variable updates are run on the fly and thus do not need to be tracked "
"for later execution)."
msgstr ""
-#: keras.engine.base_layer.Layer.add_update:14 of
+#: keras.src.engine.base_layer.Layer.add_update:14 of
msgid ""
"Update op, or list/tuple of update ops, or zero-arg callable that returns"
" an update op. A zero-arg callable should be passed in order to disable "
@@ -4000,39 +4044,35 @@ msgid ""
"executing in Eager mode."
msgstr ""
-#: keras.engine.base_layer.Layer.add_update:18 of
-msgid "Deprecated, will be automatically inferred."
-msgstr ""
-
-#: keras.engine.base_layer.Layer.add_variable:1 of
+#: keras.src.engine.base_layer.Layer.add_variable:1 of
msgid "Deprecated, do NOT use! Alias for `add_weight`."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:1 of
+#: keras.src.engine.base_layer.Layer.add_weight:1 of
msgid "Adds a new variable to the layer."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:3 of
+#: keras.src.engine.base_layer.Layer.add_weight:3 of
msgid "Variable name."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:4 of
+#: keras.src.engine.base_layer.Layer.add_weight:4 of
msgid "Variable shape. Defaults to scalar if unspecified."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:5 of
+#: keras.src.engine.base_layer.Layer.add_weight:5 of
msgid "The type of the variable. Defaults to `self.dtype`."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:6 of
+#: keras.src.engine.base_layer.Layer.add_weight:6 of
msgid "Initializer instance (callable)."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:7 of
+#: keras.src.engine.base_layer.Layer.add_weight:7 of
msgid "Regularizer instance (callable)."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:8 of
+#: keras.src.engine.base_layer.Layer.add_weight:8 of
msgid ""
"Boolean, whether the variable should be part of the layer's "
"\"trainable_variables\" (e.g. variables, biases) or "
@@ -4040,15 +4080,28 @@ msgid ""
" `trainable` cannot be `True` if `synchronization` is set to `ON_READ`."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:13 of
+#: keras.src.engine.base_layer.Layer.add_weight:13 of
msgid "Constraint instance (callable)."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:14 of
-msgid "Whether to use `ResourceVariable`."
+#: keras.src.engine.base_layer.Layer.add_weight:14 of
+msgid ""
+"Whether to use a `ResourceVariable` or not. See [this guide]( "
+"https://www.tensorflow.org/guide/migrate/tf1_vs_tf2#resourcevariables_instead_of_referencevariables)"
+" for more information."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.add_weight:14 of
+msgid ""
+"Whether to use a `ResourceVariable` or not. See [this guide]( "
+"https://www.tensorflow.org/guide/migrate/tf1_vs_tf2#resourcevariables_instead_of_referencevariables)"
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.add_weight:17 of
+msgid "for more information."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:15 of
+#: keras.src.engine.base_layer.Layer.add_weight:18 of
msgid ""
"Indicates when a distributed a variable will be aggregated. Accepted "
"values are constants defined in the class `tf.VariableSynchronization`. "
@@ -4057,37 +4110,39 @@ msgid ""
"is set to `ON_READ`, `trainable` must not be set to `True`."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:21 of
+#: keras.src.engine.base_layer.Layer.add_weight:24 of
msgid ""
"Indicates how a distributed variable will be aggregated. Accepted values "
"are constants defined in the class `tf.VariableAggregation`."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:24 of
+#: keras.src.engine.base_layer.Layer.add_weight:27 of
msgid ""
"Additional keyword arguments. Accepted values are `getter`, "
"`collections`, `experimental_autocast` and `caching_device`."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:27 of
+#: keras.src.engine.base_layer.Layer.add_weight:30 of
msgid "The variable created."
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight
-#: keras.engine.base_layer.Layer.compute_output_signature
-#: keras.engine.base_layer.Layer.count_params
-#: keras.engine.base_layer.Layer.get_input_at
-#: keras.engine.base_layer.Layer.get_input_shape_at
-#: keras.engine.base_layer.Layer.get_output_at
-#: keras.engine.base_layer.Layer.get_output_shape_at
-#: keras.engine.base_layer.Layer.set_weights keras.engine.training.Model.build
-#: keras.engine.training.Model.evaluate keras.engine.training.Model.fit
-#: keras.engine.training.Model.load_weights keras.engine.training.Model.predict
-#: keras.engine.training.Model.predict_on_batch
-#: keras.engine.training.Model.save_weights keras.engine.training.Model.summary
-#: keras.engine.training.Model.test_on_batch
-#: keras.engine.training.Model.to_yaml
-#: keras.engine.training.Model.train_on_batch of
+#: keras.src.engine.base_layer.Layer.add_weight
+#: keras.src.engine.base_layer.Layer.compute_output_signature
+#: keras.src.engine.base_layer.Layer.count_params
+#: keras.src.engine.base_layer.Layer.get_input_at
+#: keras.src.engine.base_layer.Layer.get_input_shape_at
+#: keras.src.engine.base_layer.Layer.get_output_at
+#: keras.src.engine.base_layer.Layer.get_output_shape_at
+#: keras.src.engine.base_layer.Layer.set_weights
+#: keras.src.engine.training.Model.build
+#: keras.src.engine.training.Model.evaluate keras.src.engine.training.Model.fit
+#: keras.src.engine.training.Model.predict
+#: keras.src.engine.training.Model.predict_on_batch
+#: keras.src.engine.training.Model.save_weights
+#: keras.src.engine.training.Model.summary
+#: keras.src.engine.training.Model.test_on_batch
+#: keras.src.engine.training.Model.to_yaml
+#: keras.src.engine.training.Model.train_on_batch of
#: tensorcircuit.applications.van.MADE.input
#: tensorcircuit.applications.van.MADE.input_mask
#: tensorcircuit.applications.van.MADE.input_shape
@@ -4170,52 +4225,23 @@ msgstr ""
msgid "Raises"
msgstr ""
-#: keras.engine.base_layer.Layer.add_weight:29 of
+#: keras.src.engine.base_layer.Layer.add_weight:32 of
msgid ""
"When giving unsupported dtype and no initializer or when trainable "
-"has been set to True with synchronization set as `ON_READ`."
-msgstr ""
-
-#: keras.engine.base_layer.Layer.apply:1
-#: keras.engine.base_layer.Layer.get_losses_for:1
-#: keras.engine.base_layer.Layer.get_updates_for:1 of
-#: tensorcircuit.applications.van.MADE.state_updates:1
-#: tensorcircuit.applications.van.NMF.state_updates:1
-#: tensorcircuit.applications.van.PixelCNN.state_updates:1
-msgid "Deprecated, do NOT use!"
-msgstr ""
-
-#: keras.engine.base_layer.Layer.apply:3 of
-msgid "This is an alias of `self.__call__`."
-msgstr ""
-
-#: keras.engine.base_layer.Layer.apply:5 of
-msgid "Input tensor(s)."
-msgstr ""
-
-#: keras.engine.base_layer.Layer.apply:6 of
-msgid "additional positional arguments to be passed to `self.call`."
+"has been set to True with synchronization set as `ON_READ`."
msgstr ""
-#: keras.engine.base_layer.Layer.apply:7 of
-msgid "additional keyword arguments to be passed to `self.call`."
-msgstr ""
-
-#: keras.engine.base_layer.Layer.apply:9 of
-msgid "Output tensor(s)."
-msgstr ""
-
-#: keras.engine.training.Model.build:1 of
+#: keras.src.engine.training.Model.build:1 of
msgid "Builds the model based on input shapes received."
msgstr ""
-#: keras.engine.training.Model.build:3 of
+#: keras.src.engine.training.Model.build:3 of
msgid ""
"This is to be used for subclassed models, which do not know at "
"instantiation time what their inputs look like."
msgstr ""
-#: keras.engine.training.Model.build:6 of
+#: keras.src.engine.training.Model.build:6 of
msgid ""
"This method only exists for users who want to call `model.build()` in a "
"standalone way (as a substitute for calling the model on real data to "
@@ -4223,28 +4249,44 @@ msgid ""
"never throw unexpected errors in an unrelated workflow)."
msgstr ""
-#: keras.engine.training.Model.build:11 of
+#: keras.src.engine.training.Model.build:11 of
msgid ""
"Single tuple, `TensorShape` instance, or list/dict of shapes, where "
"shapes are tuples, integers, or `TensorShape` instances."
msgstr ""
-#: keras.engine.training.Model.build:14 of
+#: keras.src.engine.training.Model.build:15 of
msgid ""
"1. In case of invalid user-provided data (not of type tuple, list,"
" `TensorShape`, or dict). 2. If the model requires call arguments "
"that are agnostic to the input shapes (positional or keyword arg "
-"in call signature). 3. If not all layers were properly built. 4. "
-"If float type inputs are not supported within the layers."
+"in call signature). 3. If not all layers were properly built."
+" 4. If float type inputs are not supported within the layers."
msgstr ""
-#: keras.engine.training.Model.build:14 of
+#: keras.src.engine.training.Model.build:15 of
msgid ""
"In case of invalid user-provided data (not of type tuple, list, "
"`TensorShape`, or dict). 2. If the model requires call arguments that"
" are agnostic to the input shapes (positional or keyword arg in "
-"call signature). 3. If not all layers were properly built. 4. If "
-"float type inputs are not supported within the layers."
+"call signature). 3. If not all layers were properly built."
+" 4. If float type inputs are not supported within the layers."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.build_from_config:1 of
+msgid "Builds the layer's states with the supplied config dict."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.build_from_config:3 of
+msgid ""
+"By default, this method calls the `build(config[\"input_shape\"])` "
+"method, which creates weights based on the layer's input shape in the "
+"supplied config. If your config contains other information needed to load"
+" the layer's state, you should override this method."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.build_from_config:8 of
+msgid "Dict containing the input shape associated with this layer."
msgstr ""
#: of tensorcircuit.applications.van.MADE.call:1
@@ -4290,24 +4332,10 @@ msgstr ""
#: tensorcircuit.applications.van.PixelCNN.call:15
msgid ""
"A mask or list of masks. A mask can be either a boolean tensor or None "
-"(no mask). For more details, check the guide "
+"(no mask). For more details, check the guide "
"[here](https://www.tensorflow.org/guide/keras/masking_and_padding)."
msgstr ""
-#: of tensorcircuit.applications.van.MADE.call:15
-#: tensorcircuit.applications.van.NMF.call:15
-#: tensorcircuit.applications.van.PixelCNN.call:15
-msgid ""
-"A mask or list of masks. A mask can be either a boolean tensor or None "
-"(no mask). For more details, check the guide"
-msgstr ""
-
-#: of tensorcircuit.applications.van.MADE.call:17
-#: tensorcircuit.applications.van.NMF.call:17
-#: tensorcircuit.applications.van.PixelCNN.call:17
-msgid "[here](https://www.tensorflow.org/guide/keras/masking_and_padding)."
-msgstr ""
-
#: of tensorcircuit.applications.van.MADE.call:19
#: tensorcircuit.applications.van.NMF.call:19
#: tensorcircuit.applications.van.PixelCNN.call:19
@@ -4316,35 +4344,35 @@ msgid ""
"more than one outputs."
msgstr ""
-#: keras.engine.training.Model.compile:1 of
+#: keras.src.engine.training.Model.compile:1 of
msgid "Configures the model for training."
msgstr ""
-#: keras.engine.training.Model.compile:5 of
+#: keras.src.engine.training.Model.compile:5 of
msgid ""
"```python "
"model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3),"
msgstr ""
-#: keras.engine.training.Model.compile:7 of
+#: keras.src.engine.training.Model.compile:7 of
msgid ""
"loss=tf.keras.losses.BinaryCrossentropy(), "
"metrics=[tf.keras.metrics.BinaryAccuracy(),"
msgstr ""
-#: keras.engine.training.Model.compile:9 of
+#: keras.src.engine.training.Model.compile:9 of
msgid "tf.keras.metrics.FalseNegatives()])"
msgstr ""
-#: keras.engine.training.Model.compile:12 of
+#: keras.src.engine.training.Model.compile:12 of
msgid ""
"String (name of optimizer) or optimizer instance. See "
"`tf.keras.optimizers`."
msgstr ""
-#: keras.engine.training.Model.compile:14 of
+#: keras.src.engine.training.Model.compile:14 of
msgid ""
-"Loss function. Maybe be a string (name of loss function), or a "
+"Loss function. May be a string (name of loss function), or a "
"`tf.keras.losses.Loss` instance. See `tf.keras.losses`. A loss function "
"is any callable with the signature `loss = fn(y_true, y_pred)`, where "
"`y_true` are the ground truth values, and `y_pred` are the model's "
@@ -4361,7 +4389,7 @@ msgid ""
"individual losses, unless `loss_weights` is specified."
msgstr ""
-#: keras.engine.training.Model.compile:34 of
+#: keras.src.engine.training.Model.compile:34 of
msgid ""
"List of metrics to be evaluated by the model during training and testing."
" Each of this can be a string (name of a built-in function), function or "
@@ -4369,80 +4397,117 @@ msgid ""
"you will use `metrics=['accuracy']`. A function is any callable with the "
"signature `result = fn(y_true, y_pred)`. To specify different metrics for"
" different outputs of a multi-output model, you could also pass a "
-"dictionary, such as `metrics={'output_a': 'accuracy', 'output_b': "
-"['accuracy', 'mse']}`. You can also pass a list to specify a metric or a "
-"list of metrics for each output, such as `metrics=[['accuracy'], "
-"['accuracy', 'mse']]` or `metrics=['accuracy', ['accuracy', 'mse']]`. "
-"When you pass the strings 'accuracy' or 'acc', we convert this to one of "
-"`tf.keras.metrics.BinaryAccuracy`, "
+"dictionary, such as `metrics={'output_a':'accuracy', "
+"'output_b':['accuracy', 'mse']}`. You can also pass a list to specify a "
+"metric or a list of metrics for each output, such as "
+"`metrics=[['accuracy'], ['accuracy', 'mse']]` or `metrics=['accuracy', "
+"['accuracy', 'mse']]`. When you pass the strings 'accuracy' or 'acc', we "
+"convert this to one of `tf.keras.metrics.BinaryAccuracy`, "
"`tf.keras.metrics.CategoricalAccuracy`, "
-"`tf.keras.metrics.SparseCategoricalAccuracy` based on the loss function "
-"used and the model output shape. We do a similar conversion for the "
-"strings 'crossentropy' and 'ce' as well."
+"`tf.keras.metrics.SparseCategoricalAccuracy` based on the shapes of the "
+"targets and of the model output. We do a similar conversion for the "
+"strings 'crossentropy' and 'ce' as well. The metrics passed here are "
+"evaluated without sample weighting; if you would like sample weighting to"
+" apply, you can specify your metrics via the `weighted_metrics` argument "
+"instead."
msgstr ""
-#: keras.engine.training.Model.compile:51 of
+#: keras.src.engine.training.Model.compile:56 of
msgid ""
"Optional list or dictionary specifying scalar coefficients (Python "
"floats) to weight the loss contributions of different model outputs. The "
"loss value that will be minimized by the model will then be the *weighted"
" sum* of all individual losses, weighted by the `loss_weights` "
-"coefficients. If a list, it is expected to have a 1:1 mapping to the "
-"model's outputs. If a dict, it is expected to map output names "
-"(strings) to scalar coefficients."
+"coefficients. If a list, it is expected to have a 1:1 mapping to the "
+"model's outputs. If a dict, it is expected to map output names (strings) "
+"to scalar coefficients."
msgstr ""
-#: keras.engine.training.Model.compile:51 of
+#: keras.src.engine.training.Model.compile:64 of
msgid ""
-"Optional list or dictionary specifying scalar coefficients (Python "
-"floats) to weight the loss contributions of different model outputs. The "
-"loss value that will be minimized by the model will then be the *weighted"
-" sum* of all individual losses, weighted by the `loss_weights` "
-"coefficients."
+"List of metrics to be evaluated and weighted by `sample_weight` or "
+"`class_weight` during training and testing."
msgstr ""
-#: keras.engine.training.Model.compile:57 of
-msgid "If a list, it is expected to have a 1:1 mapping to the model's"
+#: keras.src.engine.training.Model.compile:66 of
+msgid ""
+"Bool. If `True`, this `Model`'s logic will not be wrapped in a "
+"`tf.function`. Recommended to leave this as `None` unless your `Model` "
+"cannot be run inside a `tf.function`. `run_eagerly=True` is not supported"
+" when using `tf.distribute.experimental.ParameterServerStrategy`. "
+"Defaults to `False`."
msgstr ""
-#: keras.engine.training.Model.compile:57 of
+#: keras.src.engine.training.Model.compile:66 of
msgid ""
-"outputs. If a dict, it is expected to map output names (strings) to "
-"scalar coefficients."
+"Bool. If `True`, this `Model`'s logic will not be wrapped in a "
+"`tf.function`. Recommended to leave this as `None` unless your `Model` "
+"cannot be run inside a `tf.function`. `run_eagerly=True` is not supported"
+" when using `tf.distribute.experimental.ParameterServerStrategy`. "
+"Defaults to"
+msgstr ""
+
+#: keras.src.engine.training.Model.compile:71 of
+msgid "`False`."
msgstr ""
-#: keras.engine.training.Model.compile:59 of
+#: keras.src.engine.training.Model.compile:72 of
msgid ""
-"List of metrics to be evaluated and weighted by `sample_weight` or "
-"`class_weight` during training and testing."
+"Int. The number of batches to run during each `tf.function` call. Running"
+" multiple batches inside a single `tf.function` call can greatly improve "
+"performance on TPUs or small models with a large Python overhead. At "
+"most, one full epoch will be run each execution. If a number larger than "
+"the size of the epoch is passed, the execution will be truncated to the "
+"size of the epoch. Note that if `steps_per_execution` is set to `N`, "
+"`Callback.on_batch_begin` and `Callback.on_batch_end` methods will only "
+"be called every `N` batches (i.e. before/after each `tf.function` "
+"execution). Defaults to `1`."
msgstr ""
-#: keras.engine.training.Model.compile:61 of
+#: keras.src.engine.training.Model.compile:82 of
msgid ""
-"Bool. Defaults to `False`. If `True`, this `Model`'s logic will not be "
-"wrapped in a `tf.function`. Recommended to leave this as `None` unless "
-"your `Model` cannot be run inside a `tf.function`. `run_eagerly=True` is "
-"not supported when using "
-"`tf.distribute.experimental.ParameterServerStrategy`."
+"If `True`, compile the model training step with XLA. "
+"[XLA](https://www.tensorflow.org/xla) is an optimizing compiler for "
+"machine learning. `jit_compile` is not enabled for by default. Note that "
+"`jit_compile=True` may not necessarily work for all models. For more "
+"information on supported operations please refer to the [XLA "
+"documentation](https://www.tensorflow.org/xla). Also refer to [known XLA "
+"issues](https://www.tensorflow.org/xla/known_issues) for more details."
msgstr ""
-#: keras.engine.training.Model.compile:66 of
+#: keras.src.engine.training.Model.compile:93 of
msgid ""
-"Int. Defaults to 1. The number of batches to run during each "
-"`tf.function` call. Running multiple batches inside a single "
-"`tf.function` call can greatly improve performance on TPUs or small "
-"models with a large Python overhead. At most, one full epoch will be run "
-"each execution. If a number larger than the size of the epoch is passed, "
-"the execution will be truncated to the size of the epoch. Note that if "
-"`steps_per_execution` is set to `N`, `Callback.on_batch_begin` and "
-"`Callback.on_batch_end` methods will only be called every `N` batches "
-"(i.e. before/after each `tf.function` execution)."
+"Integer or 'auto'. Used for `tf.distribute.ParameterServerStrategy` "
+"training only. This arg sets the number of shards to split the dataset "
+"into, to enable an exact visitation guarantee for evaluation, meaning the"
+" model will be applied to each dataset element exactly once, even if "
+"workers fail. The dataset must be sharded to ensure separate workers do "
+"not process the same data. The number of shards should be at least the "
+"number of workers for good performance. A value of 'auto' turns on exact "
+"evaluation and uses a heuristic for the number of shards based on the "
+"number of workers. 0, meaning no visitation guarantee is provided. NOTE: "
+"Custom implementations of `Model.test_step` will be ignored when doing "
+"exact evaluation. Defaults to `0`."
msgstr ""
-#: keras.engine.training.Model.compile:77 of
+#: keras.src.engine.training.Model.compile:106 of
msgid "Arguments supported for backwards compatibility only."
msgstr ""
+#: keras.src.engine.training.Model.compile_from_config:1 of
+msgid "Compiles the model with the information given in config."
+msgstr ""
+
+#: keras.src.engine.training.Model.compile_from_config:3 of
+msgid ""
+"This method uses the information in the config (optimizer, loss, metrics,"
+" etc.) to compile the model."
+msgstr ""
+
+#: keras.src.engine.training.Model.compile_from_config:6 of
+msgid "Dict containing information for compiling the model."
+msgstr ""
+
#: of tensorcircuit.applications.van.MADE.compute_dtype:1
#: tensorcircuit.applications.van.MaskedConv2D.compute_dtype:1
#: tensorcircuit.applications.van.MaskedLinear.compute_dtype:1
@@ -4513,56 +4578,202 @@ msgstr ""
msgid "The layer's compute dtype."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_mask:1 of
+#: keras.src.engine.training.Model.compute_loss:1 of
+msgid "Compute the total loss, validate it, and return it."
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:3 of
+msgid ""
+"Subclasses can optionally override this method to provide custom loss "
+"computation logic."
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:6 of
+msgid "Example: ```python class MyModel(tf.keras.Model):"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:12 of
+msgid "def __init__(self, *args, **kwargs):"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:11 of
+msgid ""
+"super(MyModel, self).__init__(*args, **kwargs) self.loss_tracker = "
+"tf.keras.metrics.Mean(name='loss')"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:18 of
+msgid "def compute_loss(self, x, y, y_pred, sample_weight):"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:15 of
+msgid ""
+"loss = tf.reduce_mean(tf.math.squared_difference(y_pred, y)) loss += "
+"tf.add_n(self.losses) self.loss_tracker.update_state(loss) return loss"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:21 of
+msgid "def reset_metrics(self):"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:21 of
+msgid "self.loss_tracker.reset_states()"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:23 of
+msgid "@property def metrics(self):"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:25 of
+msgid "return [self.loss_tracker]"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:27 of
+msgid ""
+"tensors = tf.random.uniform((10, 10)), tf.random.uniform((10,)) dataset ="
+" tf.data.Dataset.from_tensor_slices(tensors).repeat().batch(1)"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:30 of
+msgid ""
+"inputs = tf.keras.layers.Input(shape=(10,), name='my_input') outputs = "
+"tf.keras.layers.Dense(10)(inputs) model = MyModel(inputs, outputs) "
+"model.add_loss(tf.reduce_sum(outputs))"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:35 of
+msgid ""
+"optimizer = tf.keras.optimizers.SGD() model.compile(optimizer, "
+"loss='mse', steps_per_execution=10) model.fit(dataset, epochs=2, "
+"steps_per_epoch=10) print('My custom loss: ', "
+"model.loss_tracker.result().numpy()) ```"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:41
+#: keras.src.engine.training.Model.compute_metrics:23 of
+msgid "Input data."
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:42
+#: keras.src.engine.training.Model.compute_metrics:24 of
+msgid "Target data."
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:43 of
+msgid "Predictions returned by the model (output of `model(x)`)"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:44
+#: keras.src.engine.training.Model.compute_metrics:26 of
+msgid "Sample weights for weighting the loss function."
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_loss:46 of
+msgid ""
+"The total loss as a `tf.Tensor`, or `None` if no loss results (which is "
+"the case when called by `Model.test_step`)."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.compute_mask:1 of
msgid "Computes an output mask tensor."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_mask:3
-#: keras.engine.base_layer.Layer.compute_mask:4 of
+#: keras.src.engine.base_layer.Layer.compute_mask:3
+#: keras.src.engine.base_layer.Layer.compute_mask:4 of
msgid "Tensor or list of tensors."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_mask:6 of
+#: keras.src.engine.base_layer.Layer.compute_mask:6 of
msgid ""
"None or a tensor (or list of tensors, one per output tensor of the "
"layer)."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_mask:8 of
+#: keras.src.engine.base_layer.Layer.compute_mask:8 of
msgid "None or a tensor (or list of tensors,"
msgstr ""
-#: keras.engine.base_layer.Layer.compute_mask:9 of
+#: keras.src.engine.base_layer.Layer.compute_mask:9 of
msgid "one per output tensor of the layer)."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_output_shape:1 of
+#: keras.src.engine.training.Model.compute_metrics:1 of
+msgid "Update metric states and collect all metrics to be returned."
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_metrics:3 of
+msgid ""
+"Subclasses can optionally override this method to provide custom metric "
+"updating and collection logic."
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_metrics:6 of
+msgid "Example: ```python class MyModel(tf.keras.Sequential):"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_metrics:10 of
+msgid "def compute_metrics(self, x, y, y_pred, sample_weight):"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_metrics:12 of
+msgid ""
+"# This super call updates `self.compiled_metrics` and returns # results "
+"for all metrics listed in `self.metrics`. metric_results = super(MyModel,"
+" self).compute_metrics("
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_metrics:15 of
+msgid "x, y, y_pred, sample_weight)"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_metrics:17 of
+msgid ""
+"# Note that `self.custom_metric` is not listed in `self.metrics`. "
+"self.custom_metric.update_state(x, y, y_pred, sample_weight) "
+"metric_results['custom_metric_name'] = self.custom_metric.result() return"
+" metric_results"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_metrics:25 of
+msgid "Predictions returned by the model (output of `model.call(x)`)"
+msgstr ""
+
+#: keras.src.engine.training.Model.compute_metrics:28 of
+msgid ""
+"A `dict` containing values that will be passed to "
+"`tf.keras.callbacks.CallbackList.on_train_batch_end()`. Typically, the "
+"values of the metrics listed in `self.metrics` are returned. Example: "
+"`{'loss': 0.2, 'accuracy': 0.7}`."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.compute_output_shape:1 of
msgid "Computes the output shape of the layer."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_output_shape:3 of
+#: keras.src.engine.base_layer.Layer.compute_output_shape:3 of
msgid ""
-"If the layer has not been built, this method will call `build` on the "
-"layer. This assumes that the layer will later be used with inputs that "
-"match the input shape provided here."
+"This method will cause the layer's state to be built, if that has not "
+"happened before. This requires that the layer will later be used with "
+"inputs that match the input shape provided here."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_output_shape:7 of
+#: keras.src.engine.base_layer.Layer.compute_output_shape:7 of
msgid ""
-"Shape tuple (tuple of integers) or list of shape tuples (one per output "
-"tensor of the layer). Shape tuples can include None for free dimensions, "
-"instead of an integer."
+"Shape tuple (tuple of integers) or `tf.TensorShape`, or structure of "
+"shape tuples / `tf.TensorShape` instances (one per output tensor of the "
+"layer). Shape tuples can include None for free dimensions, instead of an "
+"integer."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_output_shape:12 of
-msgid "An input shape tuple."
+#: keras.src.engine.base_layer.Layer.compute_output_shape:13 of
+msgid "A `tf.TensorShape` instance or structure of `tf.TensorShape` instances."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_output_signature:1 of
+#: keras.src.engine.base_layer.Layer.compute_output_signature:1 of
msgid "Compute the output tensor signature of the layer based on the inputs."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_output_signature:3 of
+#: keras.src.engine.base_layer.Layer.compute_output_signature:3 of
msgid ""
"Unlike a TensorShape object, a TensorSpec object contains both shape and "
"dtype information for a tensor. This method allows layers to provide "
@@ -4572,44 +4783,59 @@ msgid ""
"matches the input dtype."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_output_signature:10 of
+#: keras.src.engine.base_layer.Layer.compute_output_signature:10 of
msgid ""
"Single TensorSpec or nested structure of TensorSpec objects, describing a"
" candidate input for the layer."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_output_signature:13 of
+#: keras.src.engine.base_layer.Layer.compute_output_signature:13 of
msgid ""
-"Single TensorSpec or nested structure of TensorSpec objects, describing"
-" how the layer would transform the provided input."
+"Single TensorSpec or nested structure of TensorSpec objects, describing"
+" how the layer would transform the provided input."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_output_signature:16 of
-msgid "Single TensorSpec or nested structure of TensorSpec objects, describing"
+#: keras.src.engine.base_layer.Layer.compute_output_signature:16 of
+msgid "Single TensorSpec or nested structure of TensorSpec objects,"
msgstr ""
-#: keras.engine.base_layer.Layer.compute_output_signature:16 of
-msgid "how the layer would transform the provided input."
+#: keras.src.engine.base_layer.Layer.compute_output_signature:16 of
+msgid "describing how the layer would transform the provided input."
msgstr ""
-#: keras.engine.base_layer.Layer.compute_output_signature:18 of
+#: keras.src.engine.base_layer.Layer.compute_output_signature:18 of
msgid "If input_signature contains a non-TensorSpec object."
msgstr ""
-#: keras.engine.base_layer.Layer.count_params:1 of
+#: keras.src.engine.base_layer.Layer.count_params:1 of
msgid "Count the total number of scalars composing the weights."
msgstr ""
-#: keras.engine.base_layer.Layer.count_params:3 of
+#: keras.src.engine.base_layer.Layer.count_params:3 of
msgid "An integer count."
msgstr ""
-#: keras.engine.base_layer.Layer.count_params:5 of
+#: keras.src.engine.base_layer.Layer.count_params:5 of
msgid ""
"if the layer isn't yet built (in which case its weights aren't yet "
"defined)."
msgstr ""
+#: of tensorcircuit.applications.van.MADE.distribute_reduction_method:1
+#: tensorcircuit.applications.van.NMF.distribute_reduction_method:1
+#: tensorcircuit.applications.van.PixelCNN.distribute_reduction_method:1
+msgid "The method employed to reduce per-replica values during training."
+msgstr ""
+
+#: of tensorcircuit.applications.van.MADE.distribute_reduction_method:3
+#: tensorcircuit.applications.van.NMF.distribute_reduction_method:3
+#: tensorcircuit.applications.van.PixelCNN.distribute_reduction_method:3
+msgid ""
+"Unless specified, the value \"auto\" will be assumed, indicating that the"
+" reduction strategy should be chosen based on the current running "
+"environment. See `reduce_per_replica` function for more details."
+msgstr ""
+
#: of tensorcircuit.applications.van.MADE.distribute_strategy:1
#: tensorcircuit.applications.van.NMF.distribute_strategy:1
#: tensorcircuit.applications.van.PixelCNN.distribute_strategy:1
@@ -4679,15 +4905,15 @@ msgstr ""
msgid "Whether the layer is dynamic (eager-only); set in the constructor."
msgstr ""
-#: keras.engine.training.Model.evaluate:1 of
+#: keras.src.engine.training.Model.evaluate:1 of
msgid "Returns the loss value & metrics values for the model in test mode."
msgstr ""
-#: keras.engine.training.Model.evaluate:3 of
+#: keras.src.engine.training.Model.evaluate:3 of
msgid "Computation is done in batches (see the `batch_size` arg.)"
msgstr ""
-#: keras.engine.training.Model.evaluate:5 of
+#: keras.src.engine.training.Model.evaluate:5 of
msgid ""
"Input data. It could be: - A Numpy array (or array-like), or a list of "
"arrays (in case the model has multiple inputs). - A TensorFlow tensor, "
@@ -4695,63 +4921,67 @@ msgid ""
"mapping input names to the corresponding array/tensors, if the model "
"has named inputs. - A `tf.data` dataset. Should return a tuple of "
"either `(inputs, targets)` or `(inputs, targets, sample_weights)`. - A "
-"generator or `keras.utils.Sequence` returning `(inputs, targets)` or "
+"generator or `keras.utils.Sequence` returning `(inputs, targets)` or "
"`(inputs, targets, sample_weights)`. A more detailed description of "
"unpacking behavior for iterator types (Dataset, generator, Sequence) is "
"given in the `Unpacking behavior for iterator-like inputs` section of "
"`Model.fit`."
msgstr ""
-#: keras.engine.training.Model.evaluate:5 keras.engine.training.Model.fit:3
-#: keras.engine.training.Model.train_on_batch:3 of
+#: keras.src.engine.training.Model.evaluate:5
+#: keras.src.engine.training.Model.fit:3
+#: keras.src.engine.training.Model.train_on_batch:3 of
msgid ""
"Input data. It could be: - A Numpy array (or array-like), or a list of "
"arrays"
msgstr ""
-#: keras.engine.training.Model.evaluate:7 keras.engine.training.Model.fit:5
-#: keras.engine.training.Model.predict:13
-#: keras.engine.training.Model.train_on_batch:5
-#: keras.engine.training.Model.train_on_batch:7 of
+#: keras.src.engine.training.Model.evaluate:7
+#: keras.src.engine.training.Model.fit:5
+#: keras.src.engine.training.Model.predict:28
+#: keras.src.engine.training.Model.train_on_batch:5
+#: keras.src.engine.training.Model.train_on_batch:7 of
msgid "(in case the model has multiple inputs)."
msgstr ""
-#: keras.engine.training.Model.evaluate:8 keras.engine.training.Model.fit:6
-#: keras.engine.training.Model.predict:14 of
+#: keras.src.engine.training.Model.evaluate:8
+#: keras.src.engine.training.Model.fit:6
+#: keras.src.engine.training.Model.predict:29 of
msgid ""
"A TensorFlow tensor, or a list of tensors (in case the model has multiple"
" inputs)."
msgstr ""
-#: keras.engine.training.Model.evaluate:10 keras.engine.training.Model.fit:8 of
+#: keras.src.engine.training.Model.evaluate:10
+#: keras.src.engine.training.Model.fit:8 of
msgid ""
"A dict mapping input names to the corresponding array/tensors, if the "
"model has named inputs."
msgstr ""
-#: keras.engine.training.Model.evaluate:12 keras.engine.training.Model.fit:10
-#: of
+#: keras.src.engine.training.Model.evaluate:12
+#: keras.src.engine.training.Model.fit:10 of
msgid ""
"A `tf.data` dataset. Should return a tuple of either `(inputs, targets)` "
"or `(inputs, targets, sample_weights)`."
msgstr ""
-#: keras.engine.training.Model.evaluate:15 keras.engine.training.Model.fit:13
-#: of
+#: keras.src.engine.training.Model.evaluate:15
+#: keras.src.engine.training.Model.fit:13 of
msgid ""
"A generator or `keras.utils.Sequence` returning `(inputs, targets)` or "
"`(inputs, targets, sample_weights)`."
msgstr ""
-#: keras.engine.training.Model.evaluate:17
-#: keras.engine.training.Model.predict:18 of
+#: keras.src.engine.training.Model.evaluate:17
+#: keras.src.engine.training.Model.predict:33 of
msgid ""
"A more detailed description of unpacking behavior for iterator types "
"(Dataset, generator, Sequence) is given in the `Unpacking behavior for "
"iterator-like inputs` section of `Model.fit`."
msgstr ""
-#: keras.engine.training.Model.evaluate:20 of
+#: keras.src.engine.training.Model.evaluate:20 of
msgid ""
"Target data. Like the input data `x`, it could be either Numpy array(s) "
"or TensorFlow tensor(s). It should be consistent with `x` (you cannot "
@@ -4760,7 +4990,7 @@ msgid ""
"specified (since targets will be obtained from the iterator/dataset)."
msgstr ""
-#: keras.engine.training.Model.evaluate:26 of
+#: keras.src.engine.training.Model.evaluate:26 of
msgid ""
"Integer or `None`. Number of samples per batch of computation. If "
"unspecified, `batch_size` will default to 32. Do not specify the "
@@ -4768,34 +4998,41 @@ msgid ""
"`keras.utils.Sequence` instances (since they generate batches)."
msgstr ""
-#: keras.engine.training.Model.evaluate:31 of
-msgid "0 or 1. Verbosity mode. 0 = silent, 1 = progress bar."
+#: keras.src.engine.training.Model.evaluate:31
+#: keras.src.engine.training.Model.predict:42 of
+msgid ""
+"`\"auto\"`, 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 ="
+" single line. `\"auto\"` becomes 1 for most cases, and to 2 when used "
+"with `ParameterServerStrategy`. Note that the progress bar is not "
+"particularly useful when logged to a file, so `verbose=2` is recommended "
+"when not running interactively (e.g. in a production environment). "
+"Defaults to 'auto'."
msgstr ""
-#: keras.engine.training.Model.evaluate:32 of
+#: keras.src.engine.training.Model.evaluate:38 of
msgid ""
"Optional Numpy array of weights for the test samples, used for weighting "
"the loss function. You can either pass a flat (1D) Numpy array with the "
"same length as the input samples (1:1 mapping between weights and "
"samples), or in the case of temporal data, you can pass a 2D array "
"with shape `(samples, sequence_length)`, to apply a different weight "
-"to every timestep of every sample. This argument is not supported "
-"when `x` is a dataset, instead pass sample weights as the third "
+"to every timestep of every sample. This argument is not supported "
+"when `x` is a dataset, instead pass sample weights as the third "
"element of `x`."
msgstr ""
-#: keras.engine.training.Model.evaluate:32 of
+#: keras.src.engine.training.Model.evaluate:38 of
msgid ""
"Optional Numpy array of weights for the test samples, used for weighting "
"the loss function. You can either pass a flat (1D) Numpy array with the "
"same length as the input samples"
msgstr ""
-#: keras.engine.training.Model.evaluate:38 of
+#: keras.src.engine.training.Model.evaluate:45 of
msgid "(1:1 mapping between weights and samples), or in the case of"
msgstr ""
-#: keras.engine.training.Model.evaluate:36 of
+#: keras.src.engine.training.Model.evaluate:42 of
msgid ""
"temporal data, you can pass a 2D array with shape `(samples, "
"sequence_length)`, to apply a different weight to every timestep of every"
@@ -4803,7 +5040,7 @@ msgid ""
"pass sample weights as the third element of `x`."
msgstr ""
-#: keras.engine.training.Model.evaluate:40 of
+#: keras.src.engine.training.Model.evaluate:47 of
msgid ""
"Integer or `None`. Total number of steps (batches of samples) before "
"declaring the evaluation round finished. Ignored with the default value "
@@ -4812,30 +5049,33 @@ msgid ""
"with array inputs."
msgstr ""
-#: keras.engine.training.Model.evaluate:45 of
+#: keras.src.engine.training.Model.evaluate:52 of
msgid ""
"List of `keras.callbacks.Callback` instances. List of callbacks to apply "
-"during evaluation. See [callbacks](/api_docs/python/tf/keras/callbacks)."
+"during evaluation. See "
+"[callbacks](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks)."
msgstr ""
-#: keras.engine.training.Model.evaluate:48 keras.engine.training.Model.fit:160
-#: keras.engine.training.Model.predict:36 of
+#: keras.src.engine.training.Model.evaluate:55
+#: keras.src.engine.training.Model.predict:58 of
msgid ""
"Integer. Used for generator or `keras.utils.Sequence` input only. Maximum"
" size for the generator queue. If unspecified, `max_queue_size` will "
"default to 10."
msgstr ""
-#: keras.engine.training.Model.evaluate:51 keras.engine.training.Model.fit:163
-#: keras.engine.training.Model.predict:39 of
+#: keras.src.engine.training.Model.evaluate:58
+#: keras.src.engine.training.Model.fit:178
+#: keras.src.engine.training.Model.predict:62 of
msgid ""
"Integer. Used for generator or `keras.utils.Sequence` input only. Maximum"
" number of processes to spin up when using process-based threading. If "
"unspecified, `workers` will default to 1."
msgstr ""
-#: keras.engine.training.Model.evaluate:54 keras.engine.training.Model.fit:167
-#: keras.engine.training.Model.predict:43 of
+#: keras.src.engine.training.Model.evaluate:62
+#: keras.src.engine.training.Model.fit:182
+#: keras.src.engine.training.Model.predict:66 of
msgid ""
"Boolean. Used for generator or `keras.utils.Sequence` input only. If "
"`True`, use process-based threading. If unspecified, "
@@ -4845,32 +5085,26 @@ msgid ""
"children processes."
msgstr ""
-#: keras.engine.training.Model.evaluate:60
-#: keras.engine.training.Model.test_on_batch:21
-#: keras.engine.training.Model.train_on_batch:25 of
+#: keras.src.engine.training.Model.evaluate:69
+#: keras.src.engine.training.Model.test_on_batch:21
+#: keras.src.engine.training.Model.train_on_batch:27 of
msgid ""
"If `True`, loss and metric results are returned as a dict, with each key "
"being the name of the metric. If `False`, they are returned as a list."
msgstr ""
-#: keras.engine.training.Model.evaluate:63 of
+#: keras.src.engine.training.Model.evaluate:72 of
msgid "Unused at this time."
msgstr ""
-#: keras.engine.training.Model.evaluate:65 of
+#: keras.src.engine.training.Model.evaluate:74 of
msgid ""
"See the discussion of `Unpacking behavior for iterator-like inputs` for "
"`Model.fit`."
msgstr ""
-#: keras.engine.training.Model.evaluate:68 of
-msgid ""
-"`Model.evaluate` is not yet supported with "
-"`tf.distribute.experimental.ParameterServerStrategy`."
-msgstr ""
-
-#: keras.engine.training.Model.evaluate:71
-#: keras.engine.training.Model.test_on_batch:25 of
+#: keras.src.engine.training.Model.evaluate:77
+#: keras.src.engine.training.Model.test_on_batch:25 of
msgid ""
"Scalar test loss (if the model has a single output and no metrics) or "
"list of scalars (if the model has multiple outputs and/or metrics). The "
@@ -4878,42 +5112,89 @@ msgid ""
"scalar outputs."
msgstr ""
-#: keras.engine.training.Model.evaluate:76 of
+#: keras.src.engine.training.Model.evaluate:82 of
msgid "If `model.evaluate` is wrapped in a `tf.function`."
msgstr ""
-#: keras.engine.training.Model.evaluate_generator:1 of
+#: keras.src.engine.training.Model.evaluate_generator:1 of
msgid "Evaluates the model on a data generator."
msgstr ""
-#: keras.engine.training.Model.evaluate_generator:4
-#: keras.engine.training.Model.fit_generator:4
-#: keras.engine.training.Model.predict_generator:4 of
+#: keras.src.engine.training.Model.evaluate_generator:4
+#: keras.src.engine.training.Model.fit_generator:4
+#: keras.src.engine.training.Model.predict_generator:4 of
msgid "DEPRECATED:"
msgstr ""
-#: keras.engine.training.Model.evaluate_generator:4 of
+#: keras.src.engine.training.Model.evaluate_generator:4 of
msgid ""
"`Model.evaluate` now supports generators, so there is no longer any need "
"to use this endpoint."
msgstr ""
-#: keras.engine.base_layer.Layer.finalize_state:1 of
+#: keras.src.engine.training.Model.export:1 of
+msgid "Create a SavedModel artifact for inference (e.g. via TF-Serving)."
+msgstr ""
+
+#: keras.src.engine.training.Model.export:3 of
+msgid ""
+"This method lets you export a model to a lightweight SavedModel artifact "
+"that contains the model's forward pass only (its `call()` method) and can"
+" be served via e.g. TF-Serving. The forward pass is registered under the "
+"name `serve()` (see example below)."
+msgstr ""
+
+#: keras.src.engine.training.Model.export:8 of
+msgid ""
+"The original code of the model (including any custom layers you may have "
+"used) is *no longer* necessary to reload the artifact -- it is entirely "
+"standalone."
+msgstr ""
+
+#: keras.src.engine.training.Model.export:12 of
+msgid "`str` or `pathlib.Path` object. Path where to save the artifact."
+msgstr ""
+
+#: keras.src.engine.training.Model.export:17 of
+msgid "```python # Create the artifact model.export(\"path/to/location\")"
+msgstr ""
+
+#: keras.src.engine.training.Model.export:21 of
+msgid ""
+"# Later, in a different process / environment... reloaded_artifact = "
+"tf.saved_model.load(\"path/to/location\") predictions = "
+"reloaded_artifact.serve(input_data) ```"
+msgstr ""
+
+#: keras.src.engine.training.Model.export:26 of
+msgid ""
+"If you would like to customize your serving endpoints, you can use the "
+"lower-level `keras.export.ExportArchive` class. The `export()` method "
+"relies on `ExportArchive` internally."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.finalize_state:1 of
msgid "Finalizes the layers state after updating layer weights."
msgstr ""
-#: keras.engine.base_layer.Layer.finalize_state:3 of
+#: keras.src.engine.base_layer.Layer.finalize_state:3 of
msgid ""
"This function can be subclassed in a layer and will be called after "
"updating a layer weights. It can be overridden to finalize any additional"
" layer state after a weight update."
msgstr ""
-#: keras.engine.training.Model.fit:1 of
-msgid "Trains the model for a fixed number of epochs (iterations on a dataset)."
+#: keras.src.engine.base_layer.Layer.finalize_state:7 of
+msgid ""
+"This function will be called after weights of a layer have been restored "
+"from a loaded model."
+msgstr ""
+
+#: keras.src.engine.training.Model.fit:1 of
+msgid "Trains the model for a fixed number of epochs (dataset iterations)."
msgstr ""
-#: keras.engine.training.Model.fit:3 of
+#: keras.src.engine.training.Model.fit:3 of
msgid ""
"Input data. It could be: - A Numpy array (or array-like), or a list of "
"arrays (in case the model has multiple inputs). - A TensorFlow tensor, "
@@ -4921,7 +5202,7 @@ msgid ""
"mapping input names to the corresponding array/tensors, if the model "
"has named inputs. - A `tf.data` dataset. Should return a tuple of "
"either `(inputs, targets)` or `(inputs, targets, sample_weights)`. - A "
-"generator or `keras.utils.Sequence` returning `(inputs, targets)` or "
+"generator or `keras.utils.Sequence` returning `(inputs, targets)` or "
"`(inputs, targets, sample_weights)`. - A "
"`tf.keras.utils.experimental.DatasetCreator`, which wraps a callable "
"that takes a single argument of type `tf.distribute.InputContext`, and "
@@ -4929,12 +5210,15 @@ msgid ""
" prefer to specify the per-replica batching and sharding logic for the "
"`Dataset`. See `tf.keras.utils.experimental.DatasetCreator` doc for "
"more information. A more detailed description of unpacking behavior for"
-" iterator types (Dataset, generator, Sequence) is given below. If using "
+" iterator types (Dataset, generator, Sequence) is given below. If these "
+"include `sample_weights` as a third component, note that sample weighting"
+" applies to the `weighted_metrics` argument but not the `metrics` "
+"argument in `compile()`. If using "
"`tf.distribute.experimental.ParameterServerStrategy`, only "
"`DatasetCreator` type is supported for `x`."
msgstr ""
-#: keras.engine.training.Model.fit:15 of
+#: keras.src.engine.training.Model.fit:15 of
msgid ""
"A `tf.keras.utils.experimental.DatasetCreator`, which wraps a callable "
"that takes a single argument of type `tf.distribute.InputContext`, and "
@@ -4944,15 +5228,18 @@ msgid ""
"information."
msgstr ""
-#: keras.engine.training.Model.fit:22 of
+#: keras.src.engine.training.Model.fit:22 of
msgid ""
"A more detailed description of unpacking behavior for iterator types "
-"(Dataset, generator, Sequence) is given below. If using "
+"(Dataset, generator, Sequence) is given below. If these include "
+"`sample_weights` as a third component, note that sample weighting applies"
+" to the `weighted_metrics` argument but not the `metrics` argument in "
+"`compile()`. If using "
"`tf.distribute.experimental.ParameterServerStrategy`, only "
"`DatasetCreator` type is supported for `x`."
msgstr ""
-#: keras.engine.training.Model.fit:26 of
+#: keras.src.engine.training.Model.fit:29 of
msgid ""
"Target data. Like the input data `x`, it could be either Numpy array(s) "
"or TensorFlow tensor(s). It should be consistent with `x` (you cannot "
@@ -4961,7 +5248,7 @@ msgid ""
"specified (since targets will be obtained from `x`)."
msgstr ""
-#: keras.engine.training.Model.fit:32 of
+#: keras.src.engine.training.Model.fit:35 of
msgid ""
"Integer or `None`. Number of samples per gradient update. If unspecified,"
" `batch_size` will default to 32. Do not specify the `batch_size` if your"
@@ -4969,7 +5256,7 @@ msgid ""
"instances (since they generate batches)."
msgstr ""
-#: keras.engine.training.Model.fit:38 of
+#: keras.src.engine.training.Model.fit:41 of
msgid ""
"Integer. Number of epochs to train the model. An epoch is an iteration "
"over the entire `x` and `y` data provided (unless the `steps_per_epoch` "
@@ -4979,16 +5266,17 @@ msgid ""
"merely until the epoch of index `epochs` is reached."
msgstr ""
-#: keras.engine.training.Model.fit:48 of
+#: keras.src.engine.training.Model.fit:51 of
msgid ""
"'auto', 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one"
-" line per epoch. 'auto' defaults to 1 for most cases, but 2 when used "
-"with `ParameterServerStrategy`. Note that the progress bar is not "
-"particularly useful when logged to a file, so verbose=2 is recommended "
-"when not running interactively (eg, in a production environment)."
+" line per epoch. 'auto' becomes 1 for most cases, but 2 when used with "
+"`ParameterServerStrategy`. Note that the progress bar is not particularly"
+" useful when logged to a file, so verbose=2 is recommended when not "
+"running interactively (eg, in a production environment). Defaults to "
+"'auto'."
msgstr ""
-#: keras.engine.training.Model.fit:55 of
+#: keras.src.engine.training.Model.fit:58 of
msgid ""
"List of `keras.callbacks.Callback` instances. List of callbacks to apply "
"during training. See `tf.keras.callbacks`. Note "
@@ -5002,43 +5290,21 @@ msgid ""
"`steps_per_epoch` value."
msgstr ""
-#: keras.engine.training.Model.fit:66 of
-msgid ""
-"Float between 0 and 1. Fraction of the training data to be used as "
-"validation data. The model will set apart this fraction of the training "
-"data, will not train on it, and will evaluate the loss and any model "
-"metrics on this data at the end of each epoch. The validation data is "
-"selected from the last samples in the `x` and `y` data provided, before "
-"shuffling. This argument is not supported when `x` is a dataset, "
-"generator or `keras.utils.Sequence` instance. `validation_split` is not "
-"yet supported with `tf.distribute.experimental.ParameterServerStrategy`."
-msgstr ""
-
-#: keras.engine.training.Model.fit:74 of
-msgid "Float between 0 and 1."
-msgstr ""
-
-#: keras.engine.training.Model.fit:68 of
-msgid ""
-"Fraction of the training data to be used as validation data. The model "
-"will set apart this fraction of the training data, will not train on it, "
-"and will evaluate the loss and any model metrics on this data at the end "
-"of each epoch. The validation data is selected from the last samples in "
-"the `x` and `y` data provided, before shuffling. This argument is not "
-"supported when `x` is a dataset, generator or"
-msgstr ""
-
-#: keras.engine.training.Model.fit:77 of
-msgid "`keras.utils.Sequence` instance."
-msgstr ""
-
-#: keras.engine.training.Model.fit:77 of
+#: keras.src.engine.training.Model.fit:70 of
msgid ""
-"`validation_split` is not yet supported with "
+"Float between 0 and 1. Fraction of the training data to be used as "
+"validation data. The model will set apart this fraction of the training "
+"data, will not train on it, and will evaluate the loss and any model "
+"metrics on this data at the end of each epoch. The validation data is "
+"selected from the last samples in the `x` and `y` data provided, before "
+"shuffling. This argument is not supported when `x` is a dataset, "
+"generator or `keras.utils.Sequence` instance. If both `validation_data` "
+"and `validation_split` are provided, `validation_data` will override "
+"`validation_split`. `validation_split` is not yet supported with "
"`tf.distribute.experimental.ParameterServerStrategy`."
msgstr ""
-#: keras.engine.training.Model.fit:79 of
+#: keras.src.engine.training.Model.fit:84 of
msgid ""
"Data on which to evaluate the loss and any model metrics at the end of "
"each epoch. The model will not be trained on this data. Thus, note the "
@@ -5046,14 +5312,14 @@ msgid ""
"or `validation_data` is not affected by regularization layers like noise "
"and dropout. `validation_data` will override `validation_split`. "
"`validation_data` could be: - A tuple `(x_val, y_val)` of Numpy arrays "
-"or tensors. - A tuple `(x_val, y_val, val_sample_weights)` of NumPy "
-"arrays. - A `tf.data.Dataset`. - A Python generator or "
+"or tensors. - A tuple `(x_val, y_val, val_sample_weights)` of NumPy"
+" arrays. - A `tf.data.Dataset`. - A Python generator or "
"`keras.utils.Sequence` returning `(inputs, targets)` or `(inputs, "
"targets, sample_weights)`. `validation_data` is not yet supported with "
"`tf.distribute.experimental.ParameterServerStrategy`."
msgstr ""
-#: keras.engine.training.Model.fit:79 of
+#: keras.src.engine.training.Model.fit:84 of
msgid ""
"Data on which to evaluate the loss and any model metrics at the end of "
"each epoch. The model will not be trained on this data. Thus, note the "
@@ -5063,33 +5329,33 @@ msgid ""
"`validation_data` could be:"
msgstr ""
-#: keras.engine.training.Model.fit:87 of
+#: keras.src.engine.training.Model.fit:92 of
msgid "A tuple `(x_val, y_val)` of Numpy arrays or tensors."
msgstr ""
-#: keras.engine.training.Model.fit:88 of
+#: keras.src.engine.training.Model.fit:93 of
msgid "A tuple `(x_val, y_val, val_sample_weights)` of NumPy arrays."
msgstr ""
-#: keras.engine.training.Model.fit:89 of
+#: keras.src.engine.training.Model.fit:95 of
msgid "A `tf.data.Dataset`."
msgstr ""
-#: keras.engine.training.Model.fit:90 of
+#: keras.src.engine.training.Model.fit:96 of
msgid "A Python generator or `keras.utils.Sequence` returning"
msgstr ""
-#: keras.engine.training.Model.fit:91 of
+#: keras.src.engine.training.Model.fit:97 of
msgid "`(inputs, targets)` or `(inputs, targets, sample_weights)`."
msgstr ""
-#: keras.engine.training.Model.fit:92 of
+#: keras.src.engine.training.Model.fit:98 of
msgid ""
"`validation_data` is not yet supported with "
"`tf.distribute.experimental.ParameterServerStrategy`."
msgstr ""
-#: keras.engine.training.Model.fit:94 of
+#: keras.src.engine.training.Model.fit:100 of
msgid ""
"Boolean (whether to shuffle the training data before each epoch) or str "
"(for 'batch'). This argument is ignored when `x` is a generator or an "
@@ -5098,57 +5364,39 @@ msgid ""
"effect when `steps_per_epoch` is not `None`."
msgstr ""
-#: keras.engine.training.Model.fit:100 of
+#: keras.src.engine.training.Model.fit:106 of
msgid ""
"Optional dictionary mapping class indices (integers) to a weight (float) "
"value, used for weighting the loss function (during training only). This "
"can be useful to tell the model to \"pay more attention\" to samples from"
-" an under-represented class."
+" an under-represented class. When `class_weight` is specified and targets"
+" have a rank of 2 or greater, either `y` must be one-hot encoded, or an "
+"explicit final dimension of `1` must be included for sparse class labels."
msgstr ""
-#: keras.engine.training.Model.fit:106 of
+#: keras.src.engine.training.Model.fit:115 of
msgid ""
-"Optional Numpy array of weights for the training samples, used for "
-"weighting the loss function (during training only). You can either pass "
-"a flat (1D) Numpy array with the same length as the input samples (1:1 "
-"mapping between weights and samples), or in the case of temporal data, "
-"you can pass a 2D array with shape `(samples, sequence_length)`, to "
-"apply a different weight to every timestep of every sample. This "
-"argument is not supported when `x` is a dataset, generator, or "
-"`keras.utils.Sequence` instance, instead provide the sample_weights as "
-"the third element of `x`."
+"Optional Numpy array of weights for the training samples, used for "
+"weighting the loss function (during training only). You can either pass a"
+" flat (1D) Numpy array with the same length as the input samples (1:1 "
+"mapping between weights and samples), or in the case of temporal data, "
+"you can pass a 2D array with shape `(samples, sequence_length)`, to apply"
+" a different weight to every timestep of every sample. This argument is "
+"not supported when `x` is a dataset, generator, or `keras.utils.Sequence`"
+" instance, instead provide the sample_weights as the third element of "
+"`x`. Note that sample weighting does not apply to metrics specified via "
+"the `metrics` argument in `compile()`. To apply sample weighting to your "
+"metrics, you can specify them via the `weighted_metrics` in `compile()` "
+"instead."
msgstr ""
-#: keras.engine.training.Model.fit:115 of
-msgid "Optional Numpy array of weights for"
-msgstr ""
-
-#: keras.engine.training.Model.fit:108 of
-msgid ""
-"the training samples, used for weighting the loss function (during "
-"training only). You can either pass a flat (1D) Numpy array with the same"
-" length as the input samples (1:1 mapping between weights and samples), "
-"or in the case of temporal data, you can pass a 2D array with shape "
-"`(samples, sequence_length)`, to apply a different weight to every "
-"timestep of every sample. This argument is not supported when `x` is a "
-"dataset, generator, or"
-msgstr ""
-
-#: keras.engine.training.Model.fit:117 of
-msgid "`keras.utils.Sequence` instance, instead provide the sample_weights"
-msgstr ""
-
-#: keras.engine.training.Model.fit:118 of
-msgid "as the third element of `x`."
-msgstr ""
-
-#: keras.engine.training.Model.fit:119 of
+#: keras.src.engine.training.Model.fit:131 of
msgid ""
"Integer. Epoch at which to start training (useful for resuming a previous"
" training run)."
msgstr ""
-#: keras.engine.training.Model.fit:122 of
+#: keras.src.engine.training.Model.fit:134 of
msgid ""
"Integer or `None`. Total number of steps (batches of samples) before "
"declaring one epoch finished and starting the next epoch. When training "
@@ -5156,15 +5404,15 @@ msgid ""
" equal to the number of samples in your dataset divided by the batch "
"size, or 1 if that cannot be determined. If x is a `tf.data` dataset, and"
" 'steps_per_epoch' is None, the epoch will run until the input dataset is"
-" exhausted. When passing an infinitely repeating dataset, you must "
+" exhausted. When passing an infinitely repeating dataset, you must "
"specify the `steps_per_epoch` argument. If `steps_per_epoch=-1` the "
-"training will run indefinitely with an infinitely repeating dataset. This"
-" argument is not supported with array inputs. When using "
+"training will run indefinitely with an infinitely repeating dataset. "
+"This argument is not supported with array inputs. When using "
"`tf.distribute.experimental.ParameterServerStrategy`: * "
"`steps_per_epoch=None` is not supported."
msgstr ""
-#: keras.engine.training.Model.fit:122 of
+#: keras.src.engine.training.Model.fit:134 of
msgid ""
"Integer or `None`. Total number of steps (batches of samples) before "
"declaring one epoch finished and starting the next epoch. When training "
@@ -5172,18 +5420,18 @@ msgid ""
" equal to the number of samples in your dataset divided by the batch "
"size, or 1 if that cannot be determined. If x is a `tf.data` dataset, and"
" 'steps_per_epoch' is None, the epoch will run until the input dataset is"
-" exhausted. When passing an infinitely repeating dataset, you must "
+" exhausted. When passing an infinitely repeating dataset, you must "
"specify the `steps_per_epoch` argument. If `steps_per_epoch=-1` the "
-"training will run indefinitely with an infinitely repeating dataset. This"
-" argument is not supported with array inputs. When using "
+"training will run indefinitely with an infinitely repeating dataset. "
+"This argument is not supported with array inputs. When using "
"`tf.distribute.experimental.ParameterServerStrategy`:"
msgstr ""
-#: keras.engine.training.Model.fit:136 of
+#: keras.src.engine.training.Model.fit:149 of
msgid "`steps_per_epoch=None` is not supported."
msgstr ""
-#: keras.engine.training.Model.fit:137 of
+#: keras.src.engine.training.Model.fit:150 of
msgid ""
"Only relevant if `validation_data` is provided and is a `tf.data` "
"dataset. Total number of steps (batches of samples) to draw before "
@@ -5197,7 +5445,7 @@ msgid ""
"time."
msgstr ""
-#: keras.engine.training.Model.fit:147 of
+#: keras.src.engine.training.Model.fit:161 of
msgid ""
"Integer or `None`. Number of samples per validation batch. If "
"unspecified, will default to `batch_size`. Do not specify the "
@@ -5206,10 +5454,10 @@ msgid ""
"batches)."
msgstr ""
-#: keras.engine.training.Model.fit:153 of
+#: keras.src.engine.training.Model.fit:167 of
msgid ""
"Only relevant if validation data is provided. Integer or "
-"`collections.abc.Container` instance (e.g. list, tuple, etc.). If an "
+"`collections.abc.Container` instance (e.g. list, tuple, etc.). If an "
"integer, specifies how many training epochs to run before a new "
"validation run is performed, e.g. `validation_freq=2` runs validation "
"every 2 epochs. If a Container, specifies the epochs on which to run "
@@ -5217,19 +5465,26 @@ msgid ""
"of the 1st, 2nd, and 10th epochs."
msgstr ""
-#: keras.engine.training.Model.fit:196 of
+#: keras.src.engine.training.Model.fit:175 of
+msgid ""
+"Integer. Used for generator or `keras.utils.Sequence` input only. Maximum"
+" size for the generator queue. If unspecified, `max_queue_size` will "
+"default to 10."
+msgstr ""
+
+#: keras.src.engine.training.Model.fit:214 of
msgid "Unpacking behavior for iterator-like inputs:"
msgstr ""
-#: keras.engine.training.Model.fit:175 of
+#: keras.src.engine.training.Model.fit:191 of
msgid "A common pattern is to pass a tf.data.Dataset, generator, or"
msgstr ""
-#: keras.engine.training.Model.fit:176 of
+#: keras.src.engine.training.Model.fit:192 of
msgid ""
"tf.keras.utils.Sequence to the `x` argument of fit, which will in fact "
"yield not only features (x) but optionally targets (y) and sample "
-"weights. Keras requires that the output of such iterator-likes be "
+"weights. Keras requires that the output of such iterator-likes be "
"unambiguous. The iterator should return a tuple of length 1, 2, or 3, "
"where the optional second and third elements will be used for y and "
"sample_weight respectively. Any other type provided will be wrapped in a "
@@ -5239,31 +5494,31 @@ msgid ""
"features, targets, and weights from the keys of a single dict."
msgstr ""
-#: keras.engine.training.Model.fit:186 of
-msgid "A notable unsupported data type is the namedtuple. The reason is that"
+#: keras.src.engine.training.Model.fit:203 of
+msgid "A notable unsupported data type is the namedtuple. The reason is"
msgstr ""
-#: keras.engine.training.Model.fit:187 of
+#: keras.src.engine.training.Model.fit:204 of
msgid ""
-"it behaves like both an ordered datatype (tuple) and a mapping datatype "
-"(dict). So given a namedtuple of the form:"
+"that it behaves like both an ordered datatype (tuple) and a mapping "
+"datatype (dict). So given a namedtuple of the form:"
msgstr ""
-#: keras.engine.training.Model.fit:189 of
+#: keras.src.engine.training.Model.fit:206 of
msgid "`namedtuple(\"example_tuple\", [\"y\", \"x\"])`"
msgstr ""
-#: keras.engine.training.Model.fit:190 of
+#: keras.src.engine.training.Model.fit:207 of
msgid ""
"it is ambiguous whether to reverse the order of the elements when "
"interpreting the value. Even worse is a tuple of the form:"
msgstr ""
-#: keras.engine.training.Model.fit:192 of
+#: keras.src.engine.training.Model.fit:209 of
msgid "`namedtuple(\"other_tuple\", [\"x\", \"y\", \"z\"])`"
msgstr ""
-#: keras.engine.training.Model.fit:193 of
+#: keras.src.engine.training.Model.fit:210 of
msgid ""
"where it is unclear if the tuple was intended to be unpacked into x, y, "
"and sample_weight or passed through as a single element to `x`. As a "
@@ -5271,44 +5526,44 @@ msgid ""
"encounters a namedtuple. (Along with instructions to remedy the issue.)"
msgstr ""
-#: keras.engine.training.Model.fit:198 of
+#: keras.src.engine.training.Model.fit:216 of
msgid ""
"A `History` object. Its `History.history` attribute is a record of "
"training loss values and metrics values at successive epochs, as well as "
"validation loss values and validation metrics values (if applicable)."
msgstr ""
-#: keras.engine.training.Model.fit:203 of
+#: keras.src.engine.training.Model.fit:221 of
msgid "1. If the model was never compiled or,"
msgstr ""
-#: keras.engine.training.Model.fit:203 of
+#: keras.src.engine.training.Model.fit:221 of
msgid "If the model was never compiled or,"
msgstr ""
-#: keras.engine.training.Model.fit:205 of
+#: keras.src.engine.training.Model.fit:223 of
msgid ""
"In case of mismatch between the provided input data and what the "
"model expects or when the input data is empty."
msgstr ""
-#: keras.engine.training.Model.fit_generator:1 of
+#: keras.src.engine.training.Model.fit_generator:1 of
msgid "Fits the model on data yielded batch-by-batch by a Python generator."
msgstr ""
-#: keras.engine.training.Model.fit_generator:4 of
+#: keras.src.engine.training.Model.fit_generator:4 of
msgid ""
"`Model.fit` now supports generators, so there is no longer any need to "
"use this endpoint."
msgstr ""
-#: keras.engine.base_layer.Layer.from_config:1
-#: keras.engine.training.Model.from_config:1 of
+#: keras.src.engine.base_layer.Layer.from_config:1
+#: keras.src.engine.training.Model.from_config:1 of
msgid "Creates a layer from its config."
msgstr ""
-#: keras.engine.base_layer.Layer.from_config:3
-#: keras.engine.training.Model.from_config:3 of
+#: keras.src.engine.base_layer.Layer.from_config:3
+#: keras.src.engine.training.Model.from_config:3 of
msgid ""
"This method is the reverse of `get_config`, capable of instantiating the "
"same layer from the config dictionary. It does not handle layer "
@@ -5316,69 +5571,106 @@ msgid ""
"`set_weights`)."
msgstr ""
-#: keras.engine.base_layer.Layer.from_config:8
-#: keras.engine.training.Model.from_config:8 of
+#: keras.src.engine.base_layer.Layer.from_config:8
+#: keras.src.engine.training.Model.from_config:8 of
msgid "A Python dictionary, typically the output of get_config."
msgstr ""
-#: keras.engine.base_layer.Layer.from_config:11
-#: keras.engine.training.Model.from_config:11
-#: keras.engine.training.Model.get_layer:9 of
+#: keras.src.engine.base_layer.Layer.from_config:11
+#: keras.src.engine.training.Model.from_config:11
+#: keras.src.engine.training.Model.get_layer:9 of
msgid "A layer instance."
msgstr ""
-#: keras.engine.base_layer.Layer.get_config:1
-#: keras.engine.training.Model.get_config:1 of
-msgid "Returns the config of the layer."
+#: keras.src.engine.base_layer.Layer.get_build_config:1 of
+msgid "Returns a dictionary with the layer's input shape."
msgstr ""
-#: keras.engine.base_layer.Layer.get_config:3
-#: keras.engine.training.Model.get_config:3 of
+#: keras.src.engine.base_layer.Layer.get_build_config:3 of
msgid ""
-"A layer config is a Python dictionary (serializable) containing the "
-"configuration of a layer. The same layer can be reinstantiated later "
-"(without its trained weights) from this configuration."
+"This method returns a config dict that can be used by "
+"`build_from_config(config)` to create all states (e.g. Variables and "
+"Lookup tables) needed by the layer."
msgstr ""
-#: keras.engine.base_layer.Layer.get_config:8
-#: keras.engine.training.Model.get_config:8 of
+#: keras.src.engine.base_layer.Layer.get_build_config:7 of
msgid ""
-"The config of a layer does not include connectivity information, nor the "
-"layer class name. These are handled by `Network` (one layer of "
-"abstraction above)."
+"By default, the config only contains the input shape that the layer was "
+"built with. If you're writing a custom layer that creates state in an "
+"unusual way, you should override this method to make sure this state is "
+"already created when Keras attempts to load its value upon model loading."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.get_build_config:13 of
+msgid "A dict containing the input shape associated with the layer."
+msgstr ""
+
+#: keras.src.engine.training.Model.get_compile_config:1 of
+msgid "Returns a serialized config with information for compiling the model."
+msgstr ""
+
+#: keras.src.engine.training.Model.get_compile_config:3 of
+msgid ""
+"This method returns a config dictionary containing all the information "
+"(optimizer, loss, metrics, etc.) with which the model was compiled."
+msgstr ""
+
+#: keras.src.engine.training.Model.get_compile_config:6 of
+msgid "A dict containing information for compiling the model."
+msgstr ""
+
+#: keras.src.engine.training.Model.get_config:1 of
+msgid "Returns the config of the `Model`."
msgstr ""
-#: keras.engine.base_layer.Layer.get_config:12
-#: keras.engine.training.Model.get_config:12 of
+#: keras.src.engine.training.Model.get_config:3 of
+msgid ""
+"Config is a Python dictionary (serializable) containing the configuration"
+" of an object, which in this case is a `Model`. This allows the `Model` "
+"to be be reinstantiated later (without its trained weights) from this "
+"configuration."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.get_config:12
+#: keras.src.engine.training.Model.get_config:8 of
msgid ""
"Note that `get_config()` does not guarantee to return a fresh copy of "
"dict every time it is called. The callers should make a copy of the "
"returned dict if they want to modify it."
msgstr ""
-#: keras.engine.base_layer.Layer.get_config:16
-#: keras.engine.training.Model.get_config:16 of
-msgid "Python dictionary."
+#: keras.src.engine.training.Model.get_config:12 of
+msgid ""
+"Developers of subclassed `Model` are advised to override this method, and"
+" continue to update the dict from `super(MyModel, self).get_config()` to "
+"provide the proper configuration of this `Model`. The default config will"
+" return config dict for init parameters if they are basic types. Raises "
+"`NotImplementedError` when in cases where a custom `get_config()` "
+"implementation is required for the subclassed model."
+msgstr ""
+
+#: keras.src.engine.training.Model.get_config:19 of
+msgid "Python dictionary containing the configuration of this `Model`."
msgstr ""
-#: keras.engine.base_layer.Layer.get_input_at:1 of
+#: keras.src.engine.base_layer.Layer.get_input_at:1 of
msgid "Retrieves the input tensor(s) of a layer at a given node."
msgstr ""
-#: keras.engine.base_layer.Layer.get_input_at:3 of
+#: keras.src.engine.base_layer.Layer.get_input_at:3 of
msgid ""
"Integer, index of the node from which to retrieve the attribute. E.g. "
"`node_index=0` will correspond to the first input node of the layer."
msgstr ""
-#: keras.engine.base_layer.Layer.get_input_at:8 of
+#: keras.src.engine.base_layer.Layer.get_input_at:8 of
msgid "A tensor (or list of tensors if the layer has multiple inputs)."
msgstr ""
-#: keras.engine.base_layer.Layer.get_input_at:10
-#: keras.engine.base_layer.Layer.get_input_shape_at:11
-#: keras.engine.base_layer.Layer.get_output_at:10
-#: keras.engine.base_layer.Layer.get_output_shape_at:11 of
+#: keras.src.engine.base_layer.Layer.get_input_at:10
+#: keras.src.engine.base_layer.Layer.get_input_shape_at:11
+#: keras.src.engine.base_layer.Layer.get_output_at:10
+#: keras.src.engine.base_layer.Layer.get_output_shape_at:11 of
#: tensorcircuit.applications.van.MADE.input:8
#: tensorcircuit.applications.van.MaskedConv2D.input:8
#: tensorcircuit.applications.van.MaskedLinear.input:8
@@ -5391,127 +5683,187 @@ msgstr ""
msgid "If called in Eager mode."
msgstr ""
-#: keras.engine.base_layer.Layer.get_input_mask_at:1 of
+#: keras.src.engine.base_layer.Layer.get_input_mask_at:1 of
msgid "Retrieves the input mask tensor(s) of a layer at a given node."
msgstr ""
-#: keras.engine.base_layer.Layer.get_input_mask_at:3
-#: keras.engine.base_layer.Layer.get_input_shape_at:3
-#: keras.engine.base_layer.Layer.get_output_mask_at:3
-#: keras.engine.base_layer.Layer.get_output_shape_at:3 of
+#: keras.src.engine.base_layer.Layer.get_input_mask_at:3
+#: keras.src.engine.base_layer.Layer.get_input_shape_at:3
+#: keras.src.engine.base_layer.Layer.get_output_mask_at:3
+#: keras.src.engine.base_layer.Layer.get_output_shape_at:3 of
msgid ""
"Integer, index of the node from which to retrieve the attribute. E.g. "
"`node_index=0` will correspond to the first time the layer was called."
msgstr ""
-#: keras.engine.base_layer.Layer.get_input_mask_at:8 of
+#: keras.src.engine.base_layer.Layer.get_input_mask_at:8 of
msgid "A mask tensor (or list of tensors if the layer has multiple inputs)."
msgstr ""
-#: keras.engine.base_layer.Layer.get_input_shape_at:1 of
+#: keras.src.engine.base_layer.Layer.get_input_shape_at:1 of
msgid "Retrieves the input shape(s) of a layer at a given node."
msgstr ""
-#: keras.engine.base_layer.Layer.get_input_shape_at:8 of
+#: keras.src.engine.base_layer.Layer.get_input_shape_at:8 of
msgid "A shape tuple (or list of shape tuples if the layer has multiple inputs)."
msgstr ""
-#: keras.engine.training.Model.get_layer:1 of
+#: keras.src.engine.training.Model.get_layer:1 of
msgid "Retrieves a layer based on either its name (unique) or index."
msgstr ""
-#: keras.engine.training.Model.get_layer:3 of
+#: keras.src.engine.training.Model.get_layer:3 of
msgid ""
"If `name` and `index` are both provided, `index` will take precedence. "
"Indices are based on order of horizontal graph traversal (bottom-up)."
msgstr ""
-#: keras.engine.training.Model.get_layer:6 of
+#: keras.src.engine.training.Model.get_layer:6 of
msgid "String, name of layer."
msgstr ""
-#: keras.engine.training.Model.get_layer:7 of
+#: keras.src.engine.training.Model.get_layer:7 of
msgid "Integer, index of layer."
msgstr ""
-#: keras.engine.base_layer.Layer.get_losses_for:3 of
-msgid "Retrieves losses relevant to a specific set of inputs."
+#: keras.src.engine.training.Model.get_metrics_result:1 of
+msgid "Returns the model's metrics values as a dict."
msgstr ""
-#: keras.engine.base_layer.Layer.get_losses_for:5
-#: keras.engine.base_layer.Layer.get_updates_for:5 of
-msgid "Input tensor or list/tuple of input tensors."
+#: keras.src.engine.training.Model.get_metrics_result:3 of
+msgid ""
+"If any of the metric result is a dict (containing multiple metrics), each"
+" of them gets added to the top level returned dict of this method."
msgstr ""
-#: keras.engine.base_layer.Layer.get_losses_for:7 of
-msgid "List of loss tensors of the layer that depend on `inputs`."
+#: keras.src.engine.training.Model.get_metrics_result:6 of
+msgid ""
+"A `dict` containing values of the metrics listed in `self.metrics`. "
+"Example: `{'loss': 0.2, 'accuracy': 0.7}`."
msgstr ""
-#: keras.engine.base_layer.Layer.get_output_at:1 of
+#: keras.src.engine.base_layer.Layer.get_output_at:1 of
msgid "Retrieves the output tensor(s) of a layer at a given node."
msgstr ""
-#: keras.engine.base_layer.Layer.get_output_at:3 of
+#: keras.src.engine.base_layer.Layer.get_output_at:3 of
msgid ""
"Integer, index of the node from which to retrieve the attribute. E.g. "
"`node_index=0` will correspond to the first output node of the layer."
msgstr ""
-#: keras.engine.base_layer.Layer.get_output_at:8 of
+#: keras.src.engine.base_layer.Layer.get_output_at:8 of
msgid "A tensor (or list of tensors if the layer has multiple outputs)."
msgstr ""
-#: keras.engine.base_layer.Layer.get_output_mask_at:1 of
+#: keras.src.engine.base_layer.Layer.get_output_mask_at:1 of
msgid "Retrieves the output mask tensor(s) of a layer at a given node."
msgstr ""
-#: keras.engine.base_layer.Layer.get_output_mask_at:8 of
+#: keras.src.engine.base_layer.Layer.get_output_mask_at:8 of
msgid "A mask tensor (or list of tensors if the layer has multiple outputs)."
msgstr ""
-#: keras.engine.base_layer.Layer.get_output_shape_at:1 of
+#: keras.src.engine.base_layer.Layer.get_output_shape_at:1 of
msgid "Retrieves the output shape(s) of a layer at a given node."
msgstr ""
-#: keras.engine.base_layer.Layer.get_output_shape_at:8 of
+#: keras.src.engine.base_layer.Layer.get_output_shape_at:8 of
msgid "A shape tuple (or list of shape tuples if the layer has multiple outputs)."
msgstr ""
-#: keras.engine.base_layer.Layer.get_updates_for:3 of
-msgid "Retrieves updates relevant to a specific set of inputs."
+#: keras.src.engine.training.Model.get_weight_paths:1 of
+msgid "Retrieve all the variables and their paths for the model."
msgstr ""
-#: keras.engine.base_layer.Layer.get_updates_for:7 of
-msgid "List of update ops of the layer that depend on `inputs`."
+#: keras.src.engine.training.Model.get_weight_paths:3 of
+msgid ""
+"The variable path (string) is a stable key to identify a `tf.Variable` "
+"instance owned by the model. It can be used to specify variable-specific "
+"configurations (e.g. DTensor, quantization) from a global view."
msgstr ""
-#: keras.engine.training.Model.get_weights:1 of
-msgid "Retrieves the weights of the model."
+#: keras.src.engine.training.Model.get_weight_paths:7 of
+msgid ""
+"This method returns a dict with weight object paths as keys and the "
+"corresponding `tf.Variable` instances as values."
msgstr ""
-#: keras.engine.training.Model.get_weights:3 of
-msgid "A flat list of Numpy arrays."
+#: keras.src.engine.training.Model.get_weight_paths:10 of
+msgid ""
+"Note that if the model is a subclassed model and the weights haven't been"
+" initialized, an empty dict will be returned."
msgstr ""
-#: of tensorcircuit.applications.van.MADE.inbound_nodes:1
-#: tensorcircuit.applications.van.MADE.outbound_nodes:1
-#: tensorcircuit.applications.van.MaskedConv2D.inbound_nodes:1
-#: tensorcircuit.applications.van.MaskedConv2D.outbound_nodes:1
-#: tensorcircuit.applications.van.MaskedLinear.inbound_nodes:1
-#: tensorcircuit.applications.van.MaskedLinear.outbound_nodes:1
-#: tensorcircuit.applications.van.NMF.inbound_nodes:1
-#: tensorcircuit.applications.van.NMF.outbound_nodes:1
-#: tensorcircuit.applications.van.PixelCNN.inbound_nodes:1
-#: tensorcircuit.applications.van.PixelCNN.outbound_nodes:1
-#: tensorcircuit.applications.van.ResidualBlock.inbound_nodes:1
-#: tensorcircuit.applications.van.ResidualBlock.outbound_nodes:1
-#: tensorcircuit.applications.vqes.Linear.inbound_nodes:1
-#: tensorcircuit.applications.vqes.Linear.outbound_nodes:1
-#: tensorcircuit.keras.HardwareLayer.inbound_nodes:1
-#: tensorcircuit.keras.HardwareLayer.outbound_nodes:1
-#: tensorcircuit.keras.QuantumLayer.inbound_nodes:1
-#: tensorcircuit.keras.QuantumLayer.outbound_nodes:1
-msgid "Deprecated, do NOT use! Only for compatibility with external Keras."
+#: keras.src.engine.training.Model.get_weight_paths:13 of
+msgid ""
+"A dict where keys are variable paths and values are `tf.Variable` "
+"instances."
+msgstr ""
+
+#: keras.src.engine.training.Model.get_weight_paths:16 of
+msgid "A dict where keys are variable paths and values are `tf.Variable`"
+msgstr ""
+
+#: keras.src.engine.training.Model.get_weight_paths:16 of
+msgid "instances."
+msgstr ""
+
+#: keras.src.engine.training.Model.get_weight_paths:20 of
+msgid "```python class SubclassModel(tf.keras.Model):"
+msgstr ""
+
+#: keras.src.engine.training.Model.get_weight_paths:26 of
+msgid "def __init__(self, name=None):"
+msgstr ""
+
+#: keras.src.engine.training.Model.get_weight_paths:24 of
+msgid ""
+"super().__init__(name=name) self.d1 = tf.keras.layers.Dense(10) self.d2 ="
+" tf.keras.layers.Dense(20)"
+msgstr ""
+
+#: keras.src.engine.training.Model.get_weight_paths:29 of
+msgid "x = self.d1(inputs) return self.d2(x)"
+msgstr ""
+
+#: keras.src.engine.training.Model.get_weight_paths:32 of
+msgid ""
+"model = SubclassModel() model(tf.zeros((10, 10))) weight_paths = "
+"model.get_weight_paths() # weight_paths: # { # 'd1.kernel': "
+"model.d1.kernel, # 'd1.bias': model.d1.bias, # 'd2.kernel': "
+"model.d2.kernel, # 'd2.bias': model.d2.bias, # }"
+msgstr ""
+
+#: keras.src.engine.training.Model.get_weight_paths:43 of
+msgid ""
+"# Functional model inputs = tf.keras.Input((10,), batch_size=10) x = "
+"tf.keras.layers.Dense(20, name='d1')(inputs) output = "
+"tf.keras.layers.Dense(30, name='d2')(x) model = tf.keras.Model(inputs, "
+"output) d1 = model.layers[1] d2 = model.layers[2] weight_paths = "
+"model.get_weight_paths() # weight_paths: # { # 'd1.kernel': d1.kernel,"
+" # 'd1.bias': d1.bias, # 'd2.kernel': d2.kernel, # 'd2.bias': "
+"d2.bias, # } ```"
+msgstr ""
+
+#: keras.src.engine.training.Model.get_weights:1 of
+msgid "Retrieves the weights of the model."
+msgstr ""
+
+#: keras.src.engine.training.Model.get_weights:3 of
+msgid "A flat list of Numpy arrays."
+msgstr ""
+
+#: of tensorcircuit.applications.van.MADE.inbound_nodes:1
+#: tensorcircuit.applications.van.MaskedConv2D.inbound_nodes:1
+#: tensorcircuit.applications.van.MaskedLinear.inbound_nodes:1
+#: tensorcircuit.applications.van.NMF.inbound_nodes:1
+#: tensorcircuit.applications.van.PixelCNN.inbound_nodes:1
+#: tensorcircuit.applications.van.ResidualBlock.inbound_nodes:1
+#: tensorcircuit.applications.vqes.Linear.inbound_nodes:1
+#: tensorcircuit.keras.HardwareLayer.inbound_nodes:1
+#: tensorcircuit.keras.QuantumLayer.inbound_nodes:1
+msgid "Return Functional API nodes upstream of this layer."
msgstr ""
#: of tensorcircuit.applications.van.MADE.input:1
@@ -5822,92 +6174,125 @@ msgstr ""
msgid "A `tf.keras.layers.InputSpec` instance, or nested structure thereof."
msgstr ""
-#: keras.engine.training.Model.load_weights:1 of
-msgid "Loads all layer weights, either from a TensorFlow or an HDF5 weight file."
+#: of tensorcircuit.applications.van.MADE.jit_compile:1
+#: tensorcircuit.applications.van.NMF.jit_compile:1
+#: tensorcircuit.applications.van.PixelCNN.jit_compile:1
+msgid "Specify whether to compile the model with XLA."
msgstr ""
-#: keras.engine.training.Model.load_weights:3 of
+#: of tensorcircuit.applications.van.MADE.jit_compile:3
+#: tensorcircuit.applications.van.NMF.jit_compile:3
+#: tensorcircuit.applications.van.PixelCNN.jit_compile:3
msgid ""
-"If `by_name` is False weights are loaded based on the network's topology."
-" This means the architecture should be the same as when the weights were "
-"saved. Note that layers that don't have weights are not taken into "
-"account in the topological ordering, so adding or removing layers is fine"
-" as long as they don't have weights."
+"[XLA](https://www.tensorflow.org/xla) is an optimizing compiler for "
+"machine learning. `jit_compile` is not enabled by default. Note that "
+"`jit_compile=True` may not necessarily work for all models."
msgstr ""
-#: keras.engine.training.Model.load_weights:9 of
+#: of tensorcircuit.applications.van.MADE.jit_compile:7
+#: tensorcircuit.applications.van.NMF.jit_compile:7
+#: tensorcircuit.applications.van.PixelCNN.jit_compile:7
msgid ""
-"If `by_name` is True, weights are loaded into layers only if they share "
-"the same name. This is useful for fine-tuning or transfer-learning models"
-" where some of the layers have changed."
+"For more information on supported operations please refer to the [XLA "
+"documentation](https://www.tensorflow.org/xla). Also refer to [known XLA "
+"issues](https://www.tensorflow.org/xla/known_issues) for more details."
msgstr ""
-#: keras.engine.training.Model.load_weights:13 of
+#: keras.src.engine.base_layer.Layer.load_own_variables:1 of
+msgid "Loads the state of the layer."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.load_own_variables:3 of
msgid ""
-"Only topological loading (`by_name=False`) is supported when loading "
-"weights from the TensorFlow format. Note that topological loading differs"
-" slightly between TensorFlow and HDF5 formats for user-defined classes "
-"inheriting from `tf.keras.Model`: HDF5 loads based on a flattened list of"
-" weights, while the TensorFlow format loads based on the object-local "
-"names of attributes to which layers are assigned in the `Model`'s "
-"constructor."
+"You can override this method to take full control of how the state of the"
+" layer is loaded upon calling `keras.models.load_model()`."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.load_own_variables:6 of
+msgid "Dict from which the state of the model will be loaded."
msgstr ""
-#: keras.engine.training.Model.load_weights:20 of
+#: keras.src.engine.training.Model.load_weights:1 of
+msgid "Loads all layer weights from a saved files."
+msgstr ""
+
+#: keras.src.engine.training.Model.load_weights:3 of
msgid ""
-"String, path to the weights file to load. For weight files in TensorFlow "
-"format, this is the file prefix (the same as was passed to "
-"`save_weights`). This can also be a path to a SavedModel saved from "
-"`model.save`."
+"The saved file could be a SavedModel file, a `.keras` file (v3 saving "
+"format), or a file created via `model.save_weights()`."
msgstr ""
-#: keras.engine.training.Model.load_weights:24 of
+#: keras.src.engine.training.Model.load_weights:6 of
msgid ""
-"Boolean, whether to load weights by name or by topological order. Only "
-"topological loading is supported for weight files in TensorFlow format."
+"By default, weights are loaded based on the network's topology. This "
+"means the architecture should be the same as when the weights were saved."
+" Note that layers that don't have weights are not taken into account in "
+"the topological ordering, so adding or removing layers is fine as long as"
+" they don't have weights."
+msgstr ""
+
+#: keras.src.engine.training.Model.load_weights:12 of
+msgid "**Partial weight loading**"
msgstr ""
-#: keras.engine.training.Model.load_weights:27 of
+#: keras.src.engine.training.Model.load_weights:14 of
msgid ""
-"Boolean, whether to skip loading of layers where there is a mismatch in "
-"the number of weights, or a mismatch in the shape of the weight (only "
-"valid when `by_name=True`)."
+"If you have modified your model, for instance by adding a new layer (with"
+" weights) or by changing the shape of the weights of a layer, you can "
+"choose to ignore errors and continue loading by setting "
+"`skip_mismatch=True`. In this case any layer with mismatching weights "
+"will be skipped. A warning will be displayed for each skipped layer."
msgstr ""
-#: keras.engine.training.Model.load_weights:30 of
+#: keras.src.engine.training.Model.load_weights:21 of
+msgid "**Weight loading by name**"
+msgstr ""
+
+#: keras.src.engine.training.Model.load_weights:23 of
msgid ""
-"Optional `tf.train.CheckpointOptions` object that specifies options for "
-"loading weights."
+"If your weights are saved as a `.h5` file created via "
+"`model.save_weights()`, you can use the argument `by_name=True`."
+msgstr ""
+
+#: keras.src.engine.training.Model.load_weights:26 of
+msgid ""
+"In this case, weights are loaded into layers only if they share the same "
+"name. This is useful for fine-tuning or transfer-learning models where "
+"some of the layers have changed."
msgstr ""
-#: keras.engine.training.Model.load_weights:33 of
+#: keras.src.engine.training.Model.load_weights:30 of
msgid ""
-"When loading a weight file in TensorFlow format, returns the same status "
-"object as `tf.train.Checkpoint.restore`. When graph building, restore ops"
-" are run automatically as soon as the network is built (on first call for"
-" user-defined classes inheriting from `Model`, immediately if it is "
-"already built). When loading weights in HDF5 format, returns `None`."
+"Note that only topological loading (`by_name=False`) is supported when "
+"loading weights from the `.keras` v3 format or from the TensorFlow "
+"SavedModel format."
msgstr ""
-#: keras.engine.training.Model.load_weights:33 of
+#: keras.src.engine.training.Model.load_weights:34 of
msgid ""
-"When loading a weight file in TensorFlow format, returns the same status "
-"object as `tf.train.Checkpoint.restore`. When graph building, restore ops"
-" are run automatically as soon as the network is built (on first call for"
-" user-defined classes inheriting from `Model`, immediately if it is "
-"already built)."
+"String, path to the weights file to load. For weight files in TensorFlow "
+"format, this is the file prefix (the same as was passed to "
+"`save_weights()`). This can also be a path to a SavedModel or a `.keras` "
+"file (v3 saving format) saved via `model.save()`."
msgstr ""
-#: keras.engine.training.Model.load_weights:39 of
-msgid "When loading weights in HDF5 format, returns `None`."
+#: keras.src.engine.training.Model.load_weights:39 of
+msgid ""
+"Boolean, whether to skip loading of layers where there is a mismatch in "
+"the number of weights, or a mismatch in the shape of the weights."
msgstr ""
-#: keras.engine.training.Model.load_weights:41 of
-msgid "If `h5py` is not available and the weight file is in HDF5 format."
+#: keras.src.engine.training.Model.load_weights:42 of
+msgid ""
+"Boolean, whether to load weights by name or by topological order. Only "
+"topological loading is supported for weight files in the `.keras` v3 "
+"format or in the TensorFlow SavedModel format."
msgstr ""
-#: keras.engine.training.Model.load_weights:42 of
-msgid "If `skip_mismatch` is set to `True` when `by_name` is `False`."
+#: keras.src.engine.training.Model.load_weights:45 of
+msgid ""
+"Optional `tf.train.CheckpointOptions` object that specifies options for "
+"loading weights (only valid for a SavedModel file)."
msgstr ""
#: of tensorcircuit.applications.van.MADE.losses:1
@@ -5938,55 +6323,55 @@ msgid ""
"variables."
msgstr ""
-#: keras.engine.training.Model.reset_metrics:3 of
-#: tensorcircuit.applications.van.MADE.losses:7
+#: keras.src.engine.training.Model.reset_metrics:3 of
+#: tensorcircuit.applications.van.MADE.losses:8
#: tensorcircuit.applications.van.MADE.metrics:6
#: tensorcircuit.applications.van.MADE.metrics_names:6
-#: tensorcircuit.applications.van.MaskedConv2D.losses:7
-#: tensorcircuit.applications.van.MaskedLinear.losses:7
-#: tensorcircuit.applications.van.NMF.losses:7
+#: tensorcircuit.applications.van.MaskedConv2D.losses:8
+#: tensorcircuit.applications.van.MaskedLinear.losses:8
+#: tensorcircuit.applications.van.NMF.losses:8
#: tensorcircuit.applications.van.NMF.metrics:6
#: tensorcircuit.applications.van.NMF.metrics_names:6
-#: tensorcircuit.applications.van.PixelCNN.losses:7
+#: tensorcircuit.applications.van.PixelCNN.losses:8
#: tensorcircuit.applications.van.PixelCNN.metrics:6
#: tensorcircuit.applications.van.PixelCNN.metrics_names:6
-#: tensorcircuit.applications.van.ResidualBlock.losses:7
-#: tensorcircuit.applications.vqes.Linear.losses:7
-#: tensorcircuit.keras.HardwareLayer.losses:7
-#: tensorcircuit.keras.QuantumLayer.losses:7
+#: tensorcircuit.applications.van.ResidualBlock.losses:8
+#: tensorcircuit.applications.vqes.Linear.losses:8
+#: tensorcircuit.keras.HardwareLayer.losses:8
+#: tensorcircuit.keras.QuantumLayer.losses:8
msgid "Examples:"
msgstr ""
-#: of tensorcircuit.applications.van.MADE.losses:39
-#: tensorcircuit.applications.van.MaskedConv2D.losses:39
-#: tensorcircuit.applications.van.MaskedLinear.losses:39
-#: tensorcircuit.applications.van.NMF.losses:39
-#: tensorcircuit.applications.van.PixelCNN.losses:39
-#: tensorcircuit.applications.van.ResidualBlock.losses:39
-#: tensorcircuit.applications.vqes.Linear.losses:39
-#: tensorcircuit.keras.HardwareLayer.losses:39
-#: tensorcircuit.keras.QuantumLayer.losses:39
+#: of tensorcircuit.applications.van.MADE.losses:40
+#: tensorcircuit.applications.van.MaskedConv2D.losses:40
+#: tensorcircuit.applications.van.MaskedLinear.losses:40
+#: tensorcircuit.applications.van.NMF.losses:40
+#: tensorcircuit.applications.van.PixelCNN.losses:40
+#: tensorcircuit.applications.van.ResidualBlock.losses:40
+#: tensorcircuit.applications.vqes.Linear.losses:40
+#: tensorcircuit.keras.HardwareLayer.losses:40
+#: tensorcircuit.keras.QuantumLayer.losses:40
msgid "A list of tensors."
msgstr ""
-#: keras.engine.training.Model.make_predict_function:1 of
+#: keras.src.engine.training.Model.make_predict_function:1 of
msgid "Creates a function that executes one step of inference."
msgstr ""
-#: keras.engine.training.Model.make_predict_function:3 of
+#: keras.src.engine.training.Model.make_predict_function:3 of
msgid ""
"This method can be overridden to support custom inference logic. This "
"method is called by `Model.predict` and `Model.predict_on_batch`."
msgstr ""
-#: keras.engine.training.Model.make_predict_function:6 of
+#: keras.src.engine.training.Model.make_predict_function:6 of
msgid ""
"Typically, this method directly controls `tf.function` and "
"`tf.distribute.Strategy` settings, and delegates the actual evaluation "
"logic to `Model.predict_step`."
msgstr ""
-#: keras.engine.training.Model.make_predict_function:10 of
+#: keras.src.engine.training.Model.make_predict_function:10 of
msgid ""
"This function is cached the first time `Model.predict` or "
"`Model.predict_on_batch` is called. The cache is cleared whenever "
@@ -5994,36 +6379,36 @@ msgid ""
"function with `force=True`."
msgstr ""
-#: keras.engine.training.Model.make_predict_function:15 of
+#: keras.src.engine.training.Model.make_predict_function:15 of
msgid ""
"Whether to regenerate the predict function and skip the cached function "
"if available."
msgstr ""
-#: keras.engine.training.Model.make_predict_function:18 of
+#: keras.src.engine.training.Model.make_predict_function:18 of
msgid ""
"Function. The function created by this method should accept a "
"`tf.data.Iterator`, and return the outputs of the `Model`."
msgstr ""
-#: keras.engine.training.Model.make_test_function:1 of
+#: keras.src.engine.training.Model.make_test_function:1 of
msgid "Creates a function that executes one step of evaluation."
msgstr ""
-#: keras.engine.training.Model.make_test_function:3 of
+#: keras.src.engine.training.Model.make_test_function:3 of
msgid ""
"This method can be overridden to support custom evaluation logic. This "
"method is called by `Model.evaluate` and `Model.test_on_batch`."
msgstr ""
-#: keras.engine.training.Model.make_test_function:6 of
+#: keras.src.engine.training.Model.make_test_function:6 of
msgid ""
"Typically, this method directly controls `tf.function` and "
"`tf.distribute.Strategy` settings, and delegates the actual evaluation "
"logic to `Model.test_step`."
msgstr ""
-#: keras.engine.training.Model.make_test_function:10 of
+#: keras.src.engine.training.Model.make_test_function:10 of
msgid ""
"This function is cached the first time `Model.evaluate` or "
"`Model.test_on_batch` is called. The cache is cleared whenever "
@@ -6031,37 +6416,37 @@ msgid ""
"function with `force=True`."
msgstr ""
-#: keras.engine.training.Model.make_test_function:15 of
+#: keras.src.engine.training.Model.make_test_function:15 of
msgid ""
"Whether to regenerate the test function and skip the cached function if "
"available."
msgstr ""
-#: keras.engine.training.Model.make_test_function:18 of
+#: keras.src.engine.training.Model.make_test_function:18 of
msgid ""
"Function. The function created by this method should accept a "
"`tf.data.Iterator`, and return a `dict` containing values that will be "
"passed to `tf.keras.Callbacks.on_test_batch_end`."
msgstr ""
-#: keras.engine.training.Model.make_train_function:1 of
+#: keras.src.engine.training.Model.make_train_function:1 of
msgid "Creates a function that executes one step of training."
msgstr ""
-#: keras.engine.training.Model.make_train_function:3 of
+#: keras.src.engine.training.Model.make_train_function:3 of
msgid ""
"This method can be overridden to support custom training logic. This "
"method is called by `Model.fit` and `Model.train_on_batch`."
msgstr ""
-#: keras.engine.training.Model.make_train_function:6 of
+#: keras.src.engine.training.Model.make_train_function:6 of
msgid ""
"Typically, this method directly controls `tf.function` and "
"`tf.distribute.Strategy` settings, and delegates the actual training "
"logic to `Model.train_step`."
msgstr ""
-#: keras.engine.training.Model.make_train_function:10 of
+#: keras.src.engine.training.Model.make_train_function:10 of
msgid ""
"This function is cached the first time `Model.fit` or "
"`Model.train_on_batch` is called. The cache is cleared whenever "
@@ -6069,13 +6454,13 @@ msgid ""
"function with `force=True`."
msgstr ""
-#: keras.engine.training.Model.make_train_function:15 of
+#: keras.src.engine.training.Model.make_train_function:15 of
msgid ""
"Whether to regenerate the train function and skip the cached function if "
"available."
msgstr ""
-#: keras.engine.training.Model.make_train_function:18 of
+#: keras.src.engine.training.Model.make_train_function:18 of
msgid ""
"Function. The function created by this method should accept a "
"`tf.data.Iterator`, and return a `dict` containing values that will be "
@@ -6086,7 +6471,7 @@ msgstr ""
#: of tensorcircuit.applications.van.MADE.metrics:1
#: tensorcircuit.applications.van.NMF.metrics:1
#: tensorcircuit.applications.van.PixelCNN.metrics:1
-msgid "Returns the model's metrics added using `compile()`, `add_metric()` APIs."
+msgid "Return metrics added using `compile()` or `add_metric()`."
msgstr ""
#: of tensorcircuit.applications.van.MADE.metrics:3
@@ -6235,6 +6620,18 @@ msgstr ""
msgid "A list of non-trainable variables."
msgstr ""
+#: of tensorcircuit.applications.van.MADE.outbound_nodes:1
+#: tensorcircuit.applications.van.MaskedConv2D.outbound_nodes:1
+#: tensorcircuit.applications.van.MaskedLinear.outbound_nodes:1
+#: tensorcircuit.applications.van.NMF.outbound_nodes:1
+#: tensorcircuit.applications.van.PixelCNN.outbound_nodes:1
+#: tensorcircuit.applications.van.ResidualBlock.outbound_nodes:1
+#: tensorcircuit.applications.vqes.Linear.outbound_nodes:1
+#: tensorcircuit.keras.HardwareLayer.outbound_nodes:1
+#: tensorcircuit.keras.QuantumLayer.outbound_nodes:1
+msgid "Return Functional API nodes downstream of this layer."
+msgstr ""
+
#: of tensorcircuit.applications.van.MADE.output:1
#: tensorcircuit.applications.van.MaskedConv2D.output:1
#: tensorcircuit.applications.van.MaskedLinear.output:1
@@ -6361,22 +6758,45 @@ msgstr ""
msgid "if the layer has no defined output shape."
msgstr ""
-#: keras.engine.training.Model.predict:1 of
+#: keras.src.engine.training.Model.predict:1 of
msgid "Generates output predictions for the input samples."
msgstr ""
-#: keras.engine.training.Model.predict:3 of
+#: keras.src.engine.training.Model.predict:3 of
+msgid ""
+"Computation is done in batches. This method is designed for batch "
+"processing of large numbers of inputs. It is not intended for use inside "
+"of loops that iterate over your data and process small numbers of inputs "
+"at a time."
+msgstr ""
+
+#: keras.src.engine.training.Model.predict:8 of
+msgid ""
+"For small numbers of inputs that fit in one batch, directly use "
+"`__call__()` for faster execution, e.g., `model(x)`, or `model(x, "
+"training=False)` if you have layers such as "
+"`tf.keras.layers.BatchNormalization` that behave differently during "
+"inference. You may pair the individual model call with a `tf.function` "
+"for additional performance inside your inner loop. If you need access to "
+"numpy array values instead of tensors after your model call, you can use "
+"`tensor.numpy()` to get the numpy array value of an eager tensor."
+msgstr ""
+
+#: keras.src.engine.training.Model.predict:18 of
+msgid ""
+"Also, note the fact that test loss is not affected by regularization "
+"layers like noise and dropout."
+msgstr ""
+
+#: keras.src.engine.training.Model.predict:21 of
msgid ""
-"Computation is done in batches. This method is designed for performance "
-"in large scale inputs. For small amount of inputs that fit in one batch, "
-"directly using `__call__()` is recommended for faster execution, e.g., "
-"`model(x)`, or `model(x, training=False)` if you have layers such as "
-"`tf.keras.layers.BatchNormalization` that behaves differently during "
-"inference. Also, note the fact that test loss is not affected by "
-"regularization layers like noise and dropout."
+"Note: See [this FAQ entry]( https://keras.io/getting_started/faq/#whats-"
+"the-difference-between-model-methods-predict-and-call) for more details "
+"about the difference between `Model` methods `predict()` and "
+"`__call__()`."
msgstr ""
-#: keras.engine.training.Model.predict:11 of
+#: keras.src.engine.training.Model.predict:26 of
msgid ""
"Input samples. It could be: - A Numpy array (or array-like), or a list of"
" arrays (in case the model has multiple inputs). - A TensorFlow tensor,"
@@ -6387,21 +6807,21 @@ msgid ""
"iterator-like inputs` section of `Model.fit`."
msgstr ""
-#: keras.engine.training.Model.predict:11 of
+#: keras.src.engine.training.Model.predict:26 of
msgid ""
"Input samples. It could be: - A Numpy array (or array-like), or a list of"
" arrays"
msgstr ""
-#: keras.engine.training.Model.predict:16 of
+#: keras.src.engine.training.Model.predict:31 of
msgid "A `tf.data` dataset."
msgstr ""
-#: keras.engine.training.Model.predict:17 of
+#: keras.src.engine.training.Model.predict:32 of
msgid "A generator or `keras.utils.Sequence` instance."
msgstr ""
-#: keras.engine.training.Model.predict:21 of
+#: keras.src.engine.training.Model.predict:36 of
msgid ""
"Integer or `None`. Number of samples per batch. If unspecified, "
"`batch_size` will default to 32. Do not specify the `batch_size` if your "
@@ -6409,11 +6829,7 @@ msgid ""
"instances (since they generate batches)."
msgstr ""
-#: keras.engine.training.Model.predict:27 of
-msgid "Verbosity mode, 0 or 1."
-msgstr ""
-
-#: keras.engine.training.Model.predict:28 of
+#: keras.src.engine.training.Model.predict:49 of
msgid ""
"Total number of steps (batches of samples) before declaring the "
"prediction round finished. Ignored with the default value of `None`. If x"
@@ -6421,13 +6837,14 @@ msgid ""
"the input dataset is exhausted."
msgstr ""
-#: keras.engine.training.Model.predict:33 of
+#: keras.src.engine.training.Model.predict:54 of
msgid ""
"List of `keras.callbacks.Callback` instances. List of callbacks to apply "
-"during prediction. See [callbacks](/api_docs/python/tf/keras/callbacks)."
+"during prediction. See [callbacks]( "
+"https://www.tensorflow.org/api_docs/python/tf/keras/callbacks)."
msgstr ""
-#: keras.engine.training.Model.predict:50 of
+#: keras.src.engine.training.Model.predict:74 of
msgid ""
"See the discussion of `Unpacking behavior for iterator-like inputs` for "
"`Model.fit`. Note that Model.predict uses the same interpretation rules "
@@ -6435,105 +6852,105 @@ msgid ""
"all three methods."
msgstr ""
-#: keras.engine.training.Model.predict:55
-#: keras.engine.training.Model.predict_on_batch:9 of
+#: keras.src.engine.training.Model.predict:79
+#: keras.src.engine.training.Model.predict_on_batch:9 of
msgid "Numpy array(s) of predictions."
msgstr ""
-#: keras.engine.training.Model.predict:57 of
+#: keras.src.engine.training.Model.predict:81 of
msgid "If `model.predict` is wrapped in a `tf.function`."
msgstr ""
-#: keras.engine.training.Model.predict:58 of
+#: keras.src.engine.training.Model.predict:82 of
msgid ""
"In case of mismatch between the provided input data and the model's "
"expectations, or in case a stateful model receives a number of "
"samples that is not a multiple of the batch size."
msgstr ""
-#: keras.engine.training.Model.predict_generator:1 of
+#: keras.src.engine.training.Model.predict_generator:1 of
msgid "Generates predictions for the input samples from a data generator."
msgstr ""
-#: keras.engine.training.Model.predict_generator:4 of
+#: keras.src.engine.training.Model.predict_generator:4 of
msgid ""
"`Model.predict` now supports generators, so there is no longer any need "
"to use this endpoint."
msgstr ""
-#: keras.engine.training.Model.predict_on_batch:1 of
+#: keras.src.engine.training.Model.predict_on_batch:1 of
msgid "Returns predictions for a single batch of samples."
msgstr ""
-#: keras.engine.training.Model.predict_on_batch:3 of
+#: keras.src.engine.training.Model.predict_on_batch:3 of
msgid ""
"Input data. It could be: - A Numpy array (or array-like), or a list of "
"arrays (in case the model has multiple inputs). - A TensorFlow "
"tensor, or a list of tensors (in case the model has multiple inputs)."
msgstr ""
-#: keras.engine.training.Model.predict_on_batch:3
-#: keras.engine.training.Model.test_on_batch:3 of
+#: keras.src.engine.training.Model.predict_on_batch:3
+#: keras.src.engine.training.Model.test_on_batch:3 of
msgid ""
"Input data. It could be: - A Numpy array (or array-like), or a list of "
"arrays (in case the"
msgstr ""
-#: keras.engine.training.Model.predict_on_batch:5
-#: keras.engine.training.Model.test_on_batch:5 of
+#: keras.src.engine.training.Model.predict_on_batch:5
+#: keras.src.engine.training.Model.test_on_batch:5 of
msgid "model has multiple inputs)."
msgstr ""
-#: keras.engine.training.Model.predict_on_batch:7
-#: keras.engine.training.Model.test_on_batch:6 of
+#: keras.src.engine.training.Model.predict_on_batch:7
+#: keras.src.engine.training.Model.test_on_batch:6 of
msgid "A TensorFlow tensor, or a list of tensors (in case the model has"
msgstr ""
-#: keras.engine.training.Model.predict_on_batch:7
-#: keras.engine.training.Model.test_on_batch:7 of
+#: keras.src.engine.training.Model.predict_on_batch:7
+#: keras.src.engine.training.Model.test_on_batch:7 of
msgid "multiple inputs)."
msgstr ""
-#: keras.engine.training.Model.predict_on_batch:11 of
-msgid "If `model.predict_on_batch` is wrapped in a `tf.function`."
+#: keras.src.engine.training.Model.predict_on_batch:11 of
+msgid "If `model.predict_on_batch` is wrapped in a `tf.function`."
msgstr ""
-#: keras.engine.training.Model.predict_step:1 of
+#: keras.src.engine.training.Model.predict_step:1 of
msgid "The logic for one inference step."
msgstr ""
-#: keras.engine.training.Model.predict_step:3 of
+#: keras.src.engine.training.Model.predict_step:3 of
msgid ""
"This method can be overridden to support custom inference logic. This "
"method is called by `Model.make_predict_function`."
msgstr ""
-#: keras.engine.training.Model.predict_step:6 of
+#: keras.src.engine.training.Model.predict_step:6 of
msgid ""
"This method should contain the mathematical logic for one step of "
-"inference. This typically includes the forward pass."
+"inference. This typically includes the forward pass."
msgstr ""
-#: keras.engine.training.Model.predict_step:9 of
+#: keras.src.engine.training.Model.predict_step:9 of
msgid ""
"Configuration details for *how* this logic is run (e.g. `tf.function` and"
" `tf.distribute.Strategy` settings), should be left to "
"`Model.make_predict_function`, which can also be overridden."
msgstr ""
-#: keras.engine.training.Model.predict_step:13
-#: keras.engine.training.Model.test_step:15
-#: keras.engine.training.Model.train_step:16 of
+#: keras.src.engine.training.Model.predict_step:13
+#: keras.src.engine.training.Model.test_step:15
+#: keras.src.engine.training.Model.train_step:17 of
msgid "A nested structure of `Tensor`s."
msgstr ""
-#: keras.engine.training.Model.predict_step:15 of
+#: keras.src.engine.training.Model.predict_step:15 of
msgid ""
"The result of one inference step, typically the output of calling the "
"`Model` on data."
msgstr ""
-#: keras.engine.training.Model.reset_metrics:1 of
+#: keras.src.engine.training.Model.reset_metrics:1 of
msgid "Resets the state of all the metrics in the model."
msgstr ""
@@ -6566,124 +6983,170 @@ msgstr ""
msgid "Boolean, whether the model should run eagerly."
msgstr ""
-#: keras.engine.training.Model.save:1 of
-msgid "Saves the model to Tensorflow SavedModel or a single HDF5 file."
+#: keras.src.engine.training.Model.save:1 of
+msgid "Saves a model as a TensorFlow SavedModel or HDF5 file."
msgstr ""
-#: keras.engine.training.Model.save:3 of
-msgid ""
-"Please see `tf.keras.models.save_model` or the [Serialization and Saving "
-"guide](https://keras.io/guides/serialization_and_saving/) for details."
+#: keras.src.engine.training.Model.save:4 of
+msgid "See the [Serialization and Saving guide]("
msgstr ""
-#: keras.engine.training.Model.save:7 of
-msgid "String, PathLike, path to SavedModel or H5 file to save the model."
+#: keras.src.engine.training.Model.save:4 of
+msgid "https://keras.io/guides/serialization_and_saving/) for details."
msgstr ""
-#: keras.engine.training.Model.save:9
-#: keras.engine.training.Model.save_weights:47 of
-msgid ""
-"Whether to silently overwrite any existing file at the target location, "
-"or provide the user with a manual prompt."
+#: keras.src.engine.training.Model.save:6 of
+msgid "Keras model instance to be saved."
+msgstr ""
+
+#: keras.src.engine.training.Model.save:7 of
+msgid "`str` or `pathlib.Path` object. Path where to save the model."
msgstr ""
-#: keras.engine.training.Model.save:11 of
-msgid "If True, save optimizer's state together."
+#: keras.src.engine.training.Model.save:9 of
+msgid ""
+"Whether we should overwrite any existing model at the target location, or"
+" instead ask the user via an interactive prompt."
msgstr ""
-#: keras.engine.training.Model.save:12 of
+#: keras.src.engine.training.Model.save:12 of
msgid ""
-"Either `'tf'` or `'h5'`, indicating whether to save the model to "
-"Tensorflow SavedModel or HDF5. Defaults to 'tf' in TF 2.X, and 'h5' in TF"
-" 1.X."
+"Either `\"keras\"`, `\"tf\"`, `\"h5\"`, indicating whether to save the "
+"model in the native Keras format (`.keras`), in the TensorFlow SavedModel"
+" format (referred to as \"SavedModel\" below), or in the legacy HDF5 "
+"format (`.h5`). Defaults to `\"tf\"` in TF 2.X, and `\"h5\"` in TF 1.X."
+msgstr ""
+
+#: keras.src.engine.training.Model.save:36 of
+msgid "SavedModel format arguments:"
+msgstr ""
+
+#: keras.src.engine.training.Model.save:22 of
+msgid "include_optimizer: Only applied to SavedModel and legacy HDF5"
+msgstr ""
+
+#: keras.src.engine.training.Model.save:22 of
+msgid "formats. If False, do not save the optimizer state. Defaults to `True`."
msgstr ""
-#: keras.engine.training.Model.save:15 of
+#: keras.src.engine.training.Model.save:25 of
+msgid "signatures: Only applies to SavedModel format. Signatures to save"
+msgstr ""
+
+#: keras.src.engine.training.Model.save:25 of
msgid ""
-"Signatures to save with the SavedModel. Applicable to the 'tf' format "
-"only. Please see the `signatures` argument in `tf.saved_model.save` for "
-"details."
+"with the SavedModel. See the `signatures` argument in "
+"`tf.saved_model.save` for details."
+msgstr ""
+
+#: keras.src.engine.training.Model.save:28 of
+msgid "options: Only applies to SavedModel format."
msgstr ""
-#: keras.engine.training.Model.save:18 of
+#: keras.src.engine.training.Model.save:28 of
msgid ""
-"(only applies to SavedModel format) `tf.saved_model.SaveOptions` object "
-"that specifies options for saving to SavedModel."
+"`tf.saved_model.SaveOptions` object that specifies SavedModel saving "
+"options."
msgstr ""
-#: keras.engine.training.Model.save:21 of
+#: keras.src.engine.training.Model.save:36 of
+msgid "save_traces: Only applies to SavedModel format. When enabled, the"
+msgstr ""
+
+#: keras.src.engine.training.Model.save:31 of
msgid ""
-"(only applies to SavedModel format) When enabled, the SavedModel will "
-"store the function traces for each layer. This can be disabled, so that "
-"only the configs of each layer are stored. Defaults to `True`. Disabling "
-"this will decrease serialization time and reduce file size, but it "
-"requires that all custom layers/models implement a `get_config()` method."
+"SavedModel will store the function traces for each layer. This can be "
+"disabled, so that only the configs of each layer are stored. Defaults to "
+"`True`. Disabling this will decrease serialization time and reduce file "
+"size, but it requires that all custom layers/models implement a "
+"`get_config()` method."
+msgstr ""
+
+#: keras.src.engine.training.Model.save:40 of
+msgid "```python model = tf.keras.Sequential(["
msgstr ""
-#: keras.engine.training.Model.save:30 of
-msgid "```python from keras.models import load_model"
+#: keras.src.engine.training.Model.save:42 of
+msgid "tf.keras.layers.Dense(5, input_shape=(3,)), tf.keras.layers.Softmax()])"
msgstr ""
-#: keras.engine.training.Model.save:33 of
+#: keras.src.engine.training.Model.save:44 of
msgid ""
-"model.save('my_model.h5') # creates a HDF5 file 'my_model.h5' del model"
-" # deletes the existing model"
+"model.save(\"model.keras\") loaded_model = "
+"tf.keras.models.load_model(\"model.keras\") x = tf.random.uniform((10, "
+"3)) assert np.allclose(model.predict(x), loaded_model.predict(x)) ```"
msgstr ""
-#: keras.engine.training.Model.save:36 of
+#: keras.src.engine.training.Model.save:50 of
+msgid "Note that `model.save()` is an alias for `tf.keras.models.save_model()`."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.save_own_variables:1 of
+msgid "Saves the state of the layer."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.save_own_variables:3 of
msgid ""
-"# returns a compiled model # identical to the previous one model = "
-"load_model('my_model.h5') ```"
+"You can override this method to take full control of how the state of the"
+" layer is saved upon calling `model.save()`."
msgstr ""
-#: keras.engine.training.Model.save_spec:1 of
-msgid "Returns the `tf.TensorSpec` of call inputs as a tuple `(args, kwargs)`."
+#: keras.src.engine.base_layer.Layer.save_own_variables:6 of
+msgid "Dict where the state of the model will be saved."
msgstr ""
-#: keras.engine.training.Model.save_spec:3 of
+#: keras.src.engine.training.Model.save_spec:1 of
+msgid "Returns the `tf.TensorSpec` of call args as a tuple `(args, kwargs)`."
+msgstr ""
+
+#: keras.src.engine.training.Model.save_spec:3 of
msgid ""
"This value is automatically defined after calling the model for the first"
" time. Afterwards, you can use it when exporting the model for serving:"
msgstr ""
-#: keras.engine.training.Model.save_spec:6 of
+#: keras.src.engine.training.Model.save_spec:7 of
msgid "```python model = tf.keras.Model(...)"
msgstr ""
-#: keras.engine.training.Model.save_spec:9 of
+#: keras.src.engine.training.Model.save_spec:10 of
msgid "@tf.function def serve(*args, **kwargs):"
msgstr ""
-#: keras.engine.training.Model.save_spec:11 of
+#: keras.src.engine.training.Model.save_spec:12 of
msgid ""
"outputs = model(*args, **kwargs) # Apply postprocessing steps, or add "
"additional outputs. ... return outputs"
msgstr ""
-#: keras.engine.training.Model.save_spec:16 of
+#: keras.src.engine.training.Model.save_spec:17 of
msgid ""
-"# arg_specs is `[tf.TensorSpec(...), ...]`. kwarg_specs, in this example,"
-" is # an empty dict since functional models do not use keyword arguments."
-" arg_specs, kwarg_specs = model.save_spec()"
+"# arg_specs is `[tf.TensorSpec(...), ...]`. kwarg_specs, in this # "
+"example, is an empty dict since functional models do not use keyword # "
+"arguments. arg_specs, kwarg_specs = model.save_spec()"
msgstr ""
-#: keras.engine.training.Model.save_spec:20 of
+#: keras.src.engine.training.Model.save_spec:23 of
msgid "model.save(path, signatures={"
msgstr ""
-#: keras.engine.training.Model.save_spec:21 of
-msgid "'serving_default': serve.get_concrete_function(*arg_specs, **kwarg_specs)"
+#: keras.src.engine.training.Model.save_spec:23 of
+msgid "'serving_default': serve.get_concrete_function(*arg_specs,"
+msgstr ""
+
+#: keras.src.engine.training.Model.save_spec:24 of
+msgid "**kwarg_specs)"
msgstr ""
-#: keras.engine.training.Model.save_spec:23 of
+#: keras.src.engine.training.Model.save_spec:26 of
msgid "})"
msgstr ""
-#: keras.engine.training.Model.save_spec of
+#: keras.src.engine.training.Model.save_spec of
msgid "param dynamic_batch"
msgstr ""
-#: keras.engine.training.Model.save_spec:25 of
+#: keras.src.engine.training.Model.save_spec:28 of
msgid ""
"Whether to set the batch sizes of all the returned `tf.TensorSpec` to "
"`None`. (Note that when defining functional or Sequential models with "
@@ -6691,11 +7154,11 @@ msgid ""
"preserved). Defaults to `True`."
msgstr ""
-#: keras.engine.training.Model.save_spec of
+#: keras.src.engine.training.Model.save_spec of
msgid "returns"
msgstr ""
-#: keras.engine.training.Model.save_spec:30 of
+#: keras.src.engine.training.Model.save_spec:33 of
msgid ""
"If the model inputs are defined, returns a tuple `(args, kwargs)`. All "
"elements in `args` and `kwargs` are `tf.TensorSpec`. If the model inputs "
@@ -6703,49 +7166,49 @@ msgid ""
"when calling the model, `model.fit`, `model.evaluate` or `model.predict`."
msgstr ""
-#: keras.engine.training.Model.save_weights:1 of
+#: keras.src.engine.training.Model.save_weights:1 of
msgid "Saves all layer weights."
msgstr ""
-#: keras.engine.training.Model.save_weights:3 of
+#: keras.src.engine.training.Model.save_weights:3 of
msgid ""
"Either saves in HDF5 or in TensorFlow format based on the `save_format` "
"argument."
msgstr ""
-#: keras.engine.training.Model.save_weights:14 of
+#: keras.src.engine.training.Model.save_weights:14 of
msgid "When saving in HDF5 format, the weight file has:"
msgstr ""
-#: keras.engine.training.Model.save_weights:7 of
+#: keras.src.engine.training.Model.save_weights:7 of
msgid "`layer_names` (attribute), a list of strings"
msgstr ""
-#: keras.engine.training.Model.save_weights:8 of
+#: keras.src.engine.training.Model.save_weights:8 of
msgid "(ordered names of model layers)."
msgstr ""
-#: keras.engine.training.Model.save_weights:14 of
+#: keras.src.engine.training.Model.save_weights:14 of
msgid "For every layer, a `group` named `layer.name`"
msgstr ""
-#: keras.engine.training.Model.save_weights:11 of
+#: keras.src.engine.training.Model.save_weights:11 of
msgid "For every such layer group, a group attribute `weight_names`,"
msgstr ""
-#: keras.engine.training.Model.save_weights:11 of
+#: keras.src.engine.training.Model.save_weights:11 of
msgid "a list of strings (ordered names of weights tensor of the layer)."
msgstr ""
-#: keras.engine.training.Model.save_weights:14 of
+#: keras.src.engine.training.Model.save_weights:14 of
msgid "For every weight in the layer, a dataset"
msgstr ""
-#: keras.engine.training.Model.save_weights:14 of
+#: keras.src.engine.training.Model.save_weights:14 of
msgid "storing the weight value, named after the weight tensor."
msgstr ""
-#: keras.engine.training.Model.save_weights:16 of
+#: keras.src.engine.training.Model.save_weights:16 of
msgid ""
"When saving in TensorFlow format, all objects referenced by the network "
"are saved in the same format as `tf.train.Checkpoint`, including any "
@@ -6758,7 +7221,7 @@ msgid ""
"`tf.train.Checkpoint` and `tf.keras.Model` for details."
msgstr ""
-#: keras.engine.training.Model.save_weights:26 of
+#: keras.src.engine.training.Model.save_weights:26 of
msgid ""
"While the formats are the same, do not mix `save_weights` and "
"`tf.train.Checkpoint`. Checkpoints saved by `Model.save_weights` should "
@@ -6768,7 +7231,7 @@ msgid ""
"`save_weights` for training checkpoints."
msgstr ""
-#: keras.engine.training.Model.save_weights:33 of
+#: keras.src.engine.training.Model.save_weights:33 of
msgid ""
"The TensorFlow format matches objects and variables by starting at a root"
" object, `self` for `save_weights`, and greedily matching attribute "
@@ -6777,11 +7240,11 @@ msgid ""
"This means saving a `tf.keras.Model` using `save_weights` and loading "
"into a `tf.train.Checkpoint` with a `Model` attached (or vice versa) will"
" not match the `Model`'s variables. See the [guide to training "
-"checkpoints](https://www.tensorflow.org/guide/checkpoint) for details on "
-"the TensorFlow format."
+"checkpoints]( https://www.tensorflow.org/guide/checkpoint) for details on"
+" the TensorFlow format."
msgstr ""
-#: keras.engine.training.Model.save_weights:43 of
+#: keras.src.engine.training.Model.save_weights:44 of
msgid ""
"String or PathLike, path to the file to save the weights to. When saving "
"in TensorFlow format, this is the prefix used for checkpoint files "
@@ -6789,28 +7252,34 @@ msgid ""
" to be saved in HDF5 format."
msgstr ""
-#: keras.engine.training.Model.save_weights:49 of
+#: keras.src.engine.training.Model.save_weights:48 of
+msgid ""
+"Whether to silently overwrite any existing file at the target location, "
+"or provide the user with a manual prompt."
+msgstr ""
+
+#: keras.src.engine.training.Model.save_weights:50 of
msgid ""
"Either 'tf' or 'h5'. A `filepath` ending in '.h5' or '.keras' will "
-"default to HDF5 if `save_format` is `None`. Otherwise `None` defaults to "
-"'tf'."
+"default to HDF5 if `save_format` is `None`. Otherwise, `None` becomes "
+"'tf'. Defaults to `None`."
msgstr ""
-#: keras.engine.training.Model.save_weights:52 of
+#: keras.src.engine.training.Model.save_weights:53 of
msgid ""
"Optional `tf.train.CheckpointOptions` object that specifies options for "
"saving weights."
msgstr ""
-#: keras.engine.training.Model.save_weights:55 of
-msgid "If `h5py` is not available when attempting to save in HDF5 format."
+#: keras.src.engine.training.Model.save_weights:56 of
+msgid "If `h5py` is not available when attempting to save in HDF5 format."
msgstr ""
-#: keras.engine.base_layer.Layer.set_weights:1 of
+#: keras.src.engine.base_layer.Layer.set_weights:1 of
msgid "Sets the weights of the layer, from NumPy arrays."
msgstr ""
-#: keras.engine.base_layer.Layer.set_weights:3 of
+#: keras.src.engine.base_layer.Layer.set_weights:3 of
msgid ""
"The weights of a layer represent the state of the layer. This function "
"sets the weight values from numpy arrays. The weight values should be "
@@ -6819,27 +7288,33 @@ msgid ""
" layer."
msgstr ""
-#: keras.engine.base_layer.Layer.get_weights:8
-#: keras.engine.base_layer.Layer.set_weights:9 of
+#: keras.src.engine.base_layer.Layer.get_weights:8
+#: keras.src.engine.base_layer.Layer.set_weights:9 of
msgid ""
"For example, a `Dense` layer returns a list of two values: the kernel "
"matrix and the bias vector. These can be used to set the weights of "
"another `Dense` layer:"
msgstr ""
-#: keras.engine.base_layer.Layer.set_weights:33 of
+#: keras.src.engine.base_layer.Layer.set_weights:33 of
msgid ""
"a list of NumPy arrays. The number of arrays and their shape must match "
"number of the dimensions of the weights of the layer (i.e. it should "
"match the output of `get_weights`)."
msgstr ""
-#: keras.engine.base_layer.Layer.set_weights:39 of
+#: keras.src.engine.base_layer.Layer.set_weights:39 of
msgid ""
"If the provided weights list does not match the layer's "
"specifications."
msgstr ""
+#: of tensorcircuit.applications.van.MADE.state_updates:1
+#: tensorcircuit.applications.van.NMF.state_updates:1
+#: tensorcircuit.applications.van.PixelCNN.state_updates:1
+msgid "Deprecated, do NOT use!"
+msgstr ""
+
#: of tensorcircuit.applications.van.MADE.state_updates:3
#: tensorcircuit.applications.van.NMF.state_updates:3
#: tensorcircuit.applications.van.PixelCNN.state_updates:3
@@ -6898,34 +7373,50 @@ msgstr ""
msgid "A sequence of all submodules."
msgstr ""
-#: keras.engine.training.Model.summary:1 of
+#: keras.src.engine.training.Model.summary:1 of
msgid "Prints a string summary of the network."
msgstr ""
-#: keras.engine.training.Model.summary:3 of
+#: keras.src.engine.training.Model.summary:3 of
msgid ""
"Total length of printed lines (e.g. set this to adapt the display to "
"different terminal window sizes)."
msgstr ""
-#: keras.engine.training.Model.summary:6 of
+#: keras.src.engine.training.Model.summary:6 of
msgid ""
"Relative or absolute positions of log elements in each line. If not "
-"provided, defaults to `[.33, .55, .67, 1.]`."
+"provided, becomes `[0.3, 0.6, 0.70, 1.]`. Defaults to `None`."
msgstr ""
-#: keras.engine.training.Model.summary:9 of
+#: keras.src.engine.training.Model.summary:9 of
msgid ""
-"Print function to use. Defaults to `print`. It will be called on each "
-"line of the summary. You can set it to a custom function in order to "
-"capture the string summary."
+"Print function to use. By default, prints to `stdout`. If `stdout` "
+"doesn't work in your environment, change to `print`. It will be called on"
+" each line of the summary. You can set it to a custom function in order "
+"to capture the string summary."
+msgstr ""
+
+#: keras.src.engine.training.Model.summary:14 of
+msgid "Whether to expand the nested models. Defaults to `False`."
msgstr ""
-#: keras.engine.training.Model.summary:13 of
-msgid "Whether to expand the nested models. If not provided, defaults to `False`."
+#: keras.src.engine.training.Model.summary:16 of
+msgid "Whether to show if a layer is trainable. Defaults to `False`."
msgstr ""
-#: keras.engine.training.Model.summary:16 of
+#: keras.src.engine.training.Model.summary:18 of
+msgid ""
+"a list or tuple of 2 strings, which is the starting layer name and ending"
+" layer name (both inclusive) indicating the range of layers to be printed"
+" in summary. It also accepts regex patterns instead of exact name. In "
+"such case, start predicate will be the first element it matches to "
+"`layer_range[0]` and the end predicate will be the last element it "
+"matches to `layer_range[1]`. By default `None` which considers all layers"
+" of model."
+msgstr ""
+
+#: keras.src.engine.training.Model.summary:27 of
msgid "if `summary()` is called before the model is built."
msgstr ""
@@ -6941,37 +7432,38 @@ msgstr ""
msgid "Whether this layer supports computing a mask using `compute_mask`."
msgstr ""
-#: keras.engine.training.Model.test_on_batch:1 of
+#: keras.src.engine.training.Model.test_on_batch:1 of
msgid "Test the model on a single batch of samples."
msgstr ""
-#: keras.engine.training.Model.test_on_batch:3 of
+#: keras.src.engine.training.Model.test_on_batch:3 of
msgid ""
"Input data. It could be: - A Numpy array (or array-like), or a list of "
"arrays (in case the model has multiple inputs). - A TensorFlow "
"tensor, or a list of tensors (in case the model has multiple inputs)."
-" - A dict mapping input names to the corresponding array/tensors, if "
+" - A dict mapping input names to the corresponding array/tensors, if "
"the model has named inputs."
msgstr ""
-#: keras.engine.training.Model.test_on_batch:8 of
-msgid "A dict mapping input names to the corresponding array/tensors, if"
+#: keras.src.engine.training.Model.test_on_batch:8
+#: keras.src.engine.training.Model.train_on_batch:8 of
+msgid "A dict mapping input names to the corresponding array/tensors,"
msgstr ""
-#: keras.engine.training.Model.test_on_batch:9 of
-msgid "the model has named inputs."
+#: keras.src.engine.training.Model.test_on_batch:9
+#: keras.src.engine.training.Model.train_on_batch:9 of
+msgid "if the model has named inputs."
msgstr ""
-#: keras.engine.training.Model.test_on_batch:10
-#: keras.engine.training.Model.train_on_batch:10 of
+#: keras.src.engine.training.Model.test_on_batch:10 of
msgid ""
"Target data. Like the input data `x`, it could be either Numpy array(s) "
"or TensorFlow tensor(s). It should be consistent with `x` (you cannot "
"have Numpy inputs and tensor targets, or inversely)."
msgstr ""
-#: keras.engine.training.Model.test_on_batch:13
-#: keras.engine.training.Model.train_on_batch:13 of
+#: keras.src.engine.training.Model.test_on_batch:13
+#: keras.src.engine.training.Model.train_on_batch:12 of
msgid ""
"Optional array of the same length as x, containing weights to apply to "
"the model's loss for each sample. In the case of temporal data, you can "
@@ -6979,105 +7471,105 @@ msgid ""
"different weight to every timestep of every sample."
msgstr ""
-#: keras.engine.training.Model.test_on_batch:18
-#: keras.engine.training.Model.train_on_batch:22 of
+#: keras.src.engine.training.Model.test_on_batch:18
+#: keras.src.engine.training.Model.train_on_batch:24 of
msgid ""
"If `True`, the metrics returned will be only for this batch. If `False`, "
"the metrics will be statefully accumulated across batches."
msgstr ""
-#: keras.engine.training.Model.test_on_batch:30 of
-msgid "If `model.test_on_batch` is wrapped in a `tf.function`."
+#: keras.src.engine.training.Model.test_on_batch:30 of
+msgid "If `model.test_on_batch` is wrapped in a `tf.function`."
msgstr ""
-#: keras.engine.training.Model.test_step:1 of
+#: keras.src.engine.training.Model.test_step:1 of
msgid "The logic for one evaluation step."
msgstr ""
-#: keras.engine.training.Model.test_step:3 of
+#: keras.src.engine.training.Model.test_step:3 of
msgid ""
"This method can be overridden to support custom evaluation logic. This "
"method is called by `Model.make_test_function`."
msgstr ""
-#: keras.engine.training.Model.test_step:6 of
+#: keras.src.engine.training.Model.test_step:6 of
msgid ""
"This function should contain the mathematical logic for one step of "
"evaluation. This typically includes the forward pass, loss calculation, "
"and metrics updates."
msgstr ""
-#: keras.engine.training.Model.test_step:11 of
+#: keras.src.engine.training.Model.test_step:11 of
msgid ""
"Configuration details for *how* this logic is run (e.g. `tf.function` and"
" `tf.distribute.Strategy` settings), should be left to "
"`Model.make_test_function`, which can also be overridden."
msgstr ""
-#: keras.engine.training.Model.test_step:17 of
+#: keras.src.engine.training.Model.test_step:17 of
msgid ""
"A `dict` containing values that will be passed to "
"`tf.keras.callbacks.CallbackList.on_train_batch_end`. Typically, the "
"values of the `Model`'s metrics are returned."
msgstr ""
-#: keras.engine.training.Model.to_json:1 of
+#: keras.src.engine.training.Model.to_json:1 of
msgid "Returns a JSON string containing the network configuration."
msgstr ""
-#: keras.engine.training.Model.to_json:3 of
+#: keras.src.engine.training.Model.to_json:3 of
msgid ""
"To load a network from a JSON save file, use "
"`keras.models.model_from_json(json_string, custom_objects={})`."
msgstr ""
-#: keras.engine.training.Model.to_json:6 of
-msgid "Additional keyword arguments to be passed to `json.dumps()`."
+#: keras.src.engine.training.Model.to_json:6 of
+msgid "Additional keyword arguments to be passed to *`json.dumps()`."
msgstr ""
-#: keras.engine.training.Model.to_json:9 of
+#: keras.src.engine.training.Model.to_json:9 of
msgid "A JSON string."
msgstr ""
-#: keras.engine.training.Model.to_yaml:1 of
+#: keras.src.engine.training.Model.to_yaml:1 of
msgid "Returns a yaml string containing the network configuration."
msgstr ""
-#: keras.engine.training.Model.to_yaml:3 of
+#: keras.src.engine.training.Model.to_yaml:3 of
msgid ""
"Note: Since TF 2.6, this method is no longer supported and will raise a "
"RuntimeError."
msgstr ""
-#: keras.engine.training.Model.to_yaml:6 of
+#: keras.src.engine.training.Model.to_yaml:6 of
msgid ""
"To load a network from a yaml save file, use "
"`keras.models.model_from_yaml(yaml_string, custom_objects={})`."
msgstr ""
-#: keras.engine.training.Model.to_yaml:9 of
+#: keras.src.engine.training.Model.to_yaml:9 of
msgid ""
"`custom_objects` should be a dictionary mapping the names of custom "
"losses / layers / etc to the corresponding functions / classes."
msgstr ""
-#: keras.engine.training.Model.to_yaml:13 of
+#: keras.src.engine.training.Model.to_yaml:13 of
msgid "Additional keyword arguments to be passed to `yaml.dump()`."
msgstr ""
-#: keras.engine.training.Model.to_yaml:16 of
+#: keras.src.engine.training.Model.to_yaml:16 of
msgid "A YAML string."
msgstr ""
-#: keras.engine.training.Model.to_yaml:18 of
+#: keras.src.engine.training.Model.to_yaml:18 of
msgid "announces that the method poses a security risk"
msgstr ""
-#: keras.engine.training.Model.train_on_batch:1 of
+#: keras.src.engine.training.Model.train_on_batch:1 of
msgid "Runs a single gradient update on a single batch of data."
msgstr ""
-#: keras.engine.training.Model.train_on_batch:3 of
+#: keras.src.engine.training.Model.train_on_batch:3 of
msgid ""
"Input data. It could be: - A Numpy array (or array-like), or a list of "
"arrays (in case the model has multiple inputs). - A TensorFlow "
@@ -7086,27 +7578,28 @@ msgid ""
"the model has named inputs."
msgstr ""
-#: keras.engine.training.Model.train_on_batch:6 of
+#: keras.src.engine.training.Model.train_on_batch:6 of
msgid "A TensorFlow tensor, or a list of tensors"
msgstr ""
-#: keras.engine.training.Model.train_on_batch:8 of
-msgid "A dict mapping input names to the corresponding array/tensors,"
-msgstr ""
-
-#: keras.engine.training.Model.train_on_batch:9 of
-msgid "if the model has named inputs."
+#: keras.src.engine.training.Model.train_on_batch:10 of
+msgid ""
+"Target data. Like the input data `x`, it could be either Numpy array(s) "
+"or TensorFlow tensor(s)."
msgstr ""
-#: keras.engine.training.Model.train_on_batch:18 of
+#: keras.src.engine.training.Model.train_on_batch:17 of
msgid ""
"Optional dictionary mapping class indices (integers) to a weight (float) "
"to apply to the model's loss for the samples from this class during "
"training. This can be useful to tell the model to \"pay more attention\" "
-"to samples from an under-represented class."
+"to samples from an under-represented class. When `class_weight` is "
+"specified and targets have a rank of 2 or greater, either `y` must be "
+"one-hot encoded, or an explicit final dimension of `1` must be included "
+"for sparse class labels."
msgstr ""
-#: keras.engine.training.Model.train_on_batch:29 of
+#: keras.src.engine.training.Model.train_on_batch:31 of
msgid ""
"Scalar training loss (if the model has a single output and no metrics) or"
" list of scalars (if the model has multiple outputs and/or metrics). The "
@@ -7114,38 +7607,38 @@ msgid ""
"scalar outputs."
msgstr ""
-#: keras.engine.training.Model.train_on_batch:35 of
+#: keras.src.engine.training.Model.train_on_batch:37 of
msgid "If `model.train_on_batch` is wrapped in a `tf.function`."
msgstr ""
-#: keras.engine.training.Model.train_step:1 of
+#: keras.src.engine.training.Model.train_step:1 of
msgid "The logic for one training step."
msgstr ""
-#: keras.engine.training.Model.train_step:3 of
+#: keras.src.engine.training.Model.train_step:3 of
msgid ""
"This method can be overridden to support custom training logic. For "
"concrete examples of how to override this method see [Customizing what "
-"happends in "
-"fit](https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit)."
-" This method is called by `Model.make_train_function`."
+"happens in fit]( "
+"https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit). "
+"This method is called by `Model.make_train_function`."
msgstr ""
-#: keras.engine.training.Model.train_step:8 of
+#: keras.src.engine.training.Model.train_step:9 of
msgid ""
"This method should contain the mathematical logic for one step of "
-"training. This typically includes the forward pass, loss calculation, "
+"training. This typically includes the forward pass, loss calculation, "
"backpropagation, and metric updates."
msgstr ""
-#: keras.engine.training.Model.train_step:12 of
+#: keras.src.engine.training.Model.train_step:13 of
msgid ""
"Configuration details for *how* this logic is run (e.g. `tf.function` and"
" `tf.distribute.Strategy` settings), should be left to "
"`Model.make_train_function`, which can also be overridden."
msgstr ""
-#: keras.engine.training.Model.train_step:18 of
+#: keras.src.engine.training.Model.train_step:19 of
msgid ""
"A `dict` containing values that will be passed to "
"`tf.keras.callbacks.CallbackList.on_train_batch_end`. Typically, the "
@@ -7306,33 +7799,36 @@ msgstr ""
#: tensorcircuit.applications.van.MaskedLinear:1
#: tensorcircuit.applications.van.ResidualBlock:1
#: tensorcircuit.applications.vqes.Linear:1 tensorcircuit.keras.QuantumLayer:1
-msgid "Bases: :py:class:`~keras.engine.base_layer.Layer`"
+msgid "Bases: :py:class:`~keras.src.engine.base_layer.Layer`"
msgstr ""
-#: keras.engine.base_layer.Layer.build:1 of
+#: keras.src.engine.base_layer.Layer.build:1 of
#: tensorcircuit.applications.van.MaskedConv2D.build:1
#: tensorcircuit.keras.QuantumLayer.build:1
-msgid "Creates the variables of the layer (optional, for subclass implementers)."
+msgid "Creates the variables of the layer (for subclass implementers)."
msgstr ""
-#: keras.engine.base_layer.Layer.build:3 of
+#: keras.src.engine.base_layer.Layer.build:3 of
#: tensorcircuit.applications.van.MaskedConv2D.build:3
#: tensorcircuit.keras.QuantumLayer.build:3
msgid ""
"This is a method that implementers of subclasses of `Layer` or `Model` "
"can override if they need a state-creation step in-between layer "
-"instantiation and layer call."
+"instantiation and layer call. It is invoked automatically before the "
+"first execution of `call()`."
msgstr ""
-#: keras.engine.base_layer.Layer.build:7 of
-#: tensorcircuit.applications.van.MaskedConv2D.build:7
-#: tensorcircuit.keras.QuantumLayer.build:7
-msgid "This is typically used to create the weights of `Layer` subclasses."
+#: keras.src.engine.base_layer.Layer.build:8 of
+#: tensorcircuit.applications.van.MaskedConv2D.build:8
+#: tensorcircuit.keras.QuantumLayer.build:8
+msgid ""
+"This is typically used to create the weights of `Layer` subclasses (at "
+"the discretion of the subclass implementer)."
msgstr ""
-#: keras.engine.base_layer.Layer.build:9 of
-#: tensorcircuit.applications.van.MaskedConv2D.build:9
-#: tensorcircuit.keras.QuantumLayer.build:9
+#: keras.src.engine.base_layer.Layer.build:11 of
+#: tensorcircuit.applications.van.MaskedConv2D.build:11
+#: tensorcircuit.keras.QuantumLayer.build:11
msgid ""
"Instance of `TensorShape`, or list of instances of `TensorShape` if the "
"layer expects a list of inputs (one instance per input)."
@@ -7350,81 +7846,79 @@ msgstr ""
#: tensorcircuit.applications.van.ResidualBlock.call:3
#: tensorcircuit.applications.vqes.Linear.call:3
msgid ""
-"Note here that `call()` method in `tf.keras` is little bit different from"
-" `keras` API. In `keras` API, you can pass support masking for layers as "
-"additional arguments. Whereas `tf.keras` has `compute_mask()` method to "
-"support masking."
+"The `call()` method may not create state (except in its first invocation,"
+" wrapping the creation of variables or other resources in "
+"`tf.init_scope()`). It is recommended to create state, including "
+"`tf.Variable` instances and nested `Layer` instances,"
+msgstr ""
+
+#: of tensorcircuit.applications.van.MaskedConv2D.call:7
+#: tensorcircuit.applications.van.MaskedLinear.call:7
+#: tensorcircuit.applications.van.ResidualBlock.call:7
+#: tensorcircuit.applications.vqes.Linear.call:7
+msgid "in `__init__()`, or in the `build()` method that is"
msgstr ""
#: of tensorcircuit.applications.van.MaskedConv2D.call:8
#: tensorcircuit.applications.van.MaskedLinear.call:8
#: tensorcircuit.applications.van.ResidualBlock.call:8
#: tensorcircuit.applications.vqes.Linear.call:8
+msgid "called automatically before `call()` executes for the first time."
+msgstr ""
+
+#: of tensorcircuit.applications.van.MaskedConv2D.call:10
+#: tensorcircuit.applications.van.MaskedLinear.call:10
+#: tensorcircuit.applications.van.ResidualBlock.call:10
+#: tensorcircuit.applications.vqes.Linear.call:10
msgid ""
"Input tensor, or dict/list/tuple of input tensors. The first positional "
"`inputs` argument is subject to special rules: - `inputs` must be "
"explicitly passed. A layer cannot have zero arguments, and `inputs` "
"cannot be provided via the default value of a keyword argument. - NumPy"
-" array or Python scalar values in `inputs` get cast as tensors. - Keras "
-"mask metadata is only collected from `inputs`. - Layers are built "
+" array or Python scalar values in `inputs` get cast as tensors. - Keras"
+" mask metadata is only collected from `inputs`. - Layers are built "
"(`build(input_shape)` method) using shape info from `inputs` only. - "
"`input_spec` compatibility is only checked against `inputs`. - Mixed "
"precision input casting is only applied to `inputs`. If a layer has "
"tensor arguments in `*args` or `**kwargs`, their casting behavior in "
"mixed precision should be handled manually. - The SavedModel input "
-"specification is generated using `inputs` only. - Integration with "
+"specification is generated using `inputs` only. - Integration with "
"various ecosystem packages like TFMOT, TFLite, TF.js, etc is only "
"supported for `inputs` and not for tensors in positional and keyword "
"arguments."
msgstr ""
-#: of tensorcircuit.applications.van.MaskedConv2D.call:8
-#: tensorcircuit.applications.van.MaskedLinear.call:8
-#: tensorcircuit.applications.van.ResidualBlock.call:8
-#: tensorcircuit.applications.vqes.Linear.call:8
+#: of tensorcircuit.applications.van.MaskedConv2D.call:10
+#: tensorcircuit.applications.van.MaskedLinear.call:10
+#: tensorcircuit.applications.van.ResidualBlock.call:10
+#: tensorcircuit.applications.vqes.Linear.call:10
msgid ""
"Input tensor, or dict/list/tuple of input tensors. The first positional "
"`inputs` argument is subject to special rules: - `inputs` must be "
"explicitly passed. A layer cannot have zero"
msgstr ""
-#: of tensorcircuit.applications.van.MaskedConv2D.call:11
-#: tensorcircuit.applications.van.MaskedLinear.call:11
-#: tensorcircuit.applications.van.ResidualBlock.call:11
-#: tensorcircuit.applications.vqes.Linear.call:11
-msgid ""
-"arguments, and `inputs` cannot be provided via the default value of a "
-"keyword argument."
-msgstr ""
-
#: of tensorcircuit.applications.van.MaskedConv2D.call:13
#: tensorcircuit.applications.van.MaskedLinear.call:13
#: tensorcircuit.applications.van.ResidualBlock.call:13
#: tensorcircuit.applications.vqes.Linear.call:13
-msgid "NumPy array or Python scalar values in `inputs` get cast as tensors."
-msgstr ""
-
-#: of tensorcircuit.applications.van.MaskedConv2D.call:14
-#: tensorcircuit.applications.van.MaskedLinear.call:14
-#: tensorcircuit.applications.van.ResidualBlock.call:14
-#: tensorcircuit.applications.vqes.Linear.call:14
-msgid "Keras mask metadata is only collected from `inputs`."
+msgid ""
+"arguments, and `inputs` cannot be provided via the default value of a "
+"keyword argument."
msgstr ""
#: of tensorcircuit.applications.van.MaskedConv2D.call:15
#: tensorcircuit.applications.van.MaskedLinear.call:15
#: tensorcircuit.applications.van.ResidualBlock.call:15
#: tensorcircuit.applications.vqes.Linear.call:15
-msgid ""
-"Layers are built (`build(input_shape)` method) using shape info from "
-"`inputs` only."
+msgid "NumPy array or Python scalar values in `inputs` get cast as tensors."
msgstr ""
#: of tensorcircuit.applications.van.MaskedConv2D.call:17
#: tensorcircuit.applications.van.MaskedLinear.call:17
#: tensorcircuit.applications.van.ResidualBlock.call:17
#: tensorcircuit.applications.vqes.Linear.call:17
-msgid "`input_spec` compatibility is only checked against `inputs`."
+msgid "Keras mask metadata is only collected from `inputs`."
msgstr ""
#: of tensorcircuit.applications.van.MaskedConv2D.call:18
@@ -7432,57 +7926,73 @@ msgstr ""
#: tensorcircuit.applications.van.ResidualBlock.call:18
#: tensorcircuit.applications.vqes.Linear.call:18
msgid ""
-"Mixed precision input casting is only applied to `inputs`. If a layer has"
-" tensor arguments in `*args` or `**kwargs`, their casting behavior in "
-"mixed precision should be handled manually."
+"Layers are built (`build(input_shape)` method) using shape info from "
+"`inputs` only."
+msgstr ""
+
+#: of tensorcircuit.applications.van.MaskedConv2D.call:20
+#: tensorcircuit.applications.van.MaskedLinear.call:20
+#: tensorcircuit.applications.van.ResidualBlock.call:20
+#: tensorcircuit.applications.vqes.Linear.call:20
+msgid "`input_spec` compatibility is only checked against `inputs`."
msgstr ""
#: of tensorcircuit.applications.van.MaskedConv2D.call:21
#: tensorcircuit.applications.van.MaskedLinear.call:21
#: tensorcircuit.applications.van.ResidualBlock.call:21
#: tensorcircuit.applications.vqes.Linear.call:21
+msgid ""
+"Mixed precision input casting is only applied to `inputs`. If a layer has"
+" tensor arguments in `*args` or `**kwargs`, their casting behavior in "
+"mixed precision should be handled manually."
+msgstr ""
+
+#: of tensorcircuit.applications.van.MaskedConv2D.call:24
+#: tensorcircuit.applications.van.MaskedLinear.call:24
+#: tensorcircuit.applications.van.ResidualBlock.call:24
+#: tensorcircuit.applications.vqes.Linear.call:24
msgid "The SavedModel input specification is generated using `inputs` only."
msgstr ""
-#: of tensorcircuit.applications.van.MaskedConv2D.call:22
-#: tensorcircuit.applications.van.MaskedLinear.call:22
-#: tensorcircuit.applications.van.ResidualBlock.call:22
-#: tensorcircuit.applications.vqes.Linear.call:22
+#: of tensorcircuit.applications.van.MaskedConv2D.call:26
+#: tensorcircuit.applications.van.MaskedLinear.call:26
+#: tensorcircuit.applications.van.ResidualBlock.call:26
+#: tensorcircuit.applications.vqes.Linear.call:26
msgid ""
"Integration with various ecosystem packages like TFMOT, TFLite, TF.js, "
"etc is only supported for `inputs` and not for tensors in positional and "
"keyword arguments."
msgstr ""
-#: of tensorcircuit.applications.van.MaskedConv2D.call:25
-#: tensorcircuit.applications.van.MaskedLinear.call:25
-#: tensorcircuit.applications.van.ResidualBlock.call:25
-#: tensorcircuit.applications.vqes.Linear.call:25
+#: of tensorcircuit.applications.van.MaskedConv2D.call:29
+#: tensorcircuit.applications.van.MaskedLinear.call:29
+#: tensorcircuit.applications.van.ResidualBlock.call:29
+#: tensorcircuit.applications.vqes.Linear.call:29
msgid ""
"Additional positional arguments. May contain tensors, although this is "
"not recommended, for the reasons above."
msgstr ""
-#: of tensorcircuit.applications.van.MaskedConv2D.call:27
-#: tensorcircuit.applications.van.MaskedLinear.call:27
-#: tensorcircuit.applications.van.ResidualBlock.call:27
-#: tensorcircuit.applications.vqes.Linear.call:27
+#: of tensorcircuit.applications.van.MaskedConv2D.call:31
+#: tensorcircuit.applications.van.MaskedLinear.call:31
+#: tensorcircuit.applications.van.ResidualBlock.call:31
+#: tensorcircuit.applications.vqes.Linear.call:31
msgid ""
"Additional keyword arguments. May contain tensors, although this is not "
"recommended, for the reasons above. The following optional keyword "
"arguments are reserved: - `training`: Boolean scalar tensor of Python "
"boolean indicating whether the `call` is meant for training or "
"inference. - `mask`: Boolean input mask. If the layer's `call()` method "
-"takes a `mask` argument, its default value will be set to the mask "
-"generated for `inputs` by the previous layer (if `input` did come from "
-"a layer that generated a corresponding mask, i.e. if it came from a "
-"Keras layer with masking support)."
+"takes a `mask` argument, its default value will be set to the mask "
+"generated for `inputs` by the previous layer (if `input` did come from "
+"a layer that generated a corresponding mask, i.e. if it came from a "
+"Keras layer with masking support)."
msgstr ""
-#: of tensorcircuit.applications.van.MaskedConv2D.call:27
-#: tensorcircuit.applications.van.MaskedLinear.call:27
-#: tensorcircuit.applications.van.ResidualBlock.call:27
-#: tensorcircuit.applications.vqes.Linear.call:27
+#: of tensorcircuit.applications.van.MaskedConv2D.call:31
+#: tensorcircuit.applications.van.MaskedLinear.call:31
+#: tensorcircuit.applications.van.ResidualBlock.call:31
+#: tensorcircuit.applications.vqes.Linear.call:31
msgid ""
"Additional keyword arguments. May contain tensors, although this is not "
"recommended, for the reasons above. The following optional keyword "
@@ -7490,17 +8000,17 @@ msgid ""
"boolean indicating"
msgstr ""
-#: of tensorcircuit.applications.van.MaskedConv2D.call:31
-#: tensorcircuit.applications.van.MaskedLinear.call:31
-#: tensorcircuit.applications.van.ResidualBlock.call:31
-#: tensorcircuit.applications.vqes.Linear.call:31
+#: of tensorcircuit.applications.van.MaskedConv2D.call:35
+#: tensorcircuit.applications.van.MaskedLinear.call:35
+#: tensorcircuit.applications.van.ResidualBlock.call:35
+#: tensorcircuit.applications.vqes.Linear.call:35
msgid "whether the `call` is meant for training or inference."
msgstr ""
-#: of tensorcircuit.applications.van.MaskedConv2D.call:32
-#: tensorcircuit.applications.van.MaskedLinear.call:32
-#: tensorcircuit.applications.van.ResidualBlock.call:32
-#: tensorcircuit.applications.vqes.Linear.call:32
+#: of tensorcircuit.applications.van.MaskedConv2D.call:36
+#: tensorcircuit.applications.van.MaskedLinear.call:36
+#: tensorcircuit.applications.van.ResidualBlock.call:36
+#: tensorcircuit.applications.vqes.Linear.call:36
msgid ""
"`mask`: Boolean input mask. If the layer's `call()` method takes a `mask`"
" argument, its default value will be set to the mask generated for "
@@ -7509,18 +8019,40 @@ msgid ""
"masking support)."
msgstr ""
-#: of tensorcircuit.applications.van.MaskedConv2D.call:38
-#: tensorcircuit.applications.van.MaskedLinear.call:38
-#: tensorcircuit.applications.van.ResidualBlock.call:38
-#: tensorcircuit.applications.vqes.Linear.call:38
+#: of tensorcircuit.applications.van.MaskedConv2D.call:42
+#: tensorcircuit.applications.van.MaskedLinear.call:42
+#: tensorcircuit.applications.van.ResidualBlock.call:42
+#: tensorcircuit.applications.vqes.Linear.call:42
msgid "A tensor or list/tuple of tensors."
msgstr ""
-#: keras.engine.base_layer.Layer.get_weights:1 of
+#: keras.src.engine.base_layer.Layer.get_config:1 of
+msgid "Returns the config of the layer."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.get_config:3 of
+msgid ""
+"A layer config is a Python dictionary (serializable) containing the "
+"configuration of a layer. The same layer can be reinstantiated later "
+"(without its trained weights) from this configuration."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.get_config:8 of
+msgid ""
+"The config of a layer does not include connectivity information, nor the "
+"layer class name. These are handled by `Network` (one layer of "
+"abstraction above)."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.get_config:16 of
+msgid "Python dictionary."
+msgstr ""
+
+#: keras.src.engine.base_layer.Layer.get_weights:1 of
msgid "Returns the current weights of the layer, as NumPy arrays."
msgstr ""
-#: keras.engine.base_layer.Layer.get_weights:3 of
+#: keras.src.engine.base_layer.Layer.get_weights:3 of
msgid ""
"The weights of a layer represent the state of the layer. This function "
"returns both trainable and non-trainable weight values associated with "
@@ -7528,7 +8060,7 @@ msgid ""
"state into similarly parameterized layers."
msgstr ""
-#: keras.engine.base_layer.Layer.get_weights:32 of
+#: keras.src.engine.base_layer.Layer.get_weights:32 of
msgid "Weights values as a list of NumPy arrays."
msgstr ""
@@ -7561,20 +8093,20 @@ msgstr ""
#: of tensorcircuit.applications.vqes.JointSchedule:1
msgid ""
"Bases: "
-":py:class:`~keras.optimizer_v2.learning_rate_schedule.LearningRateSchedule`"
+":py:class:`~keras.src.optimizers.schedules.learning_rate_schedule.LearningRateSchedule`"
msgstr ""
-#: keras.optimizer_v2.learning_rate_schedule.LearningRateSchedule.from_config:1
+#: keras.src.optimizers.schedules.learning_rate_schedule.LearningRateSchedule.from_config:1
#: of
msgid "Instantiates a `LearningRateSchedule` from its config."
msgstr ""
-#: keras.optimizer_v2.learning_rate_schedule.LearningRateSchedule.from_config:3
+#: keras.src.optimizers.schedules.learning_rate_schedule.LearningRateSchedule.from_config:3
#: of
msgid "Output of `get_config()`."
msgstr ""
-#: keras.optimizer_v2.learning_rate_schedule.LearningRateSchedule.from_config:5
+#: keras.src.optimizers.schedules.learning_rate_schedule.LearningRateSchedule.from_config:5
#: of
msgid "A `LearningRateSchedule` instance."
msgstr ""
@@ -10823,18 +11355,18 @@ msgid "will be reduced by 1."
msgstr ""
#: of tensorcircuit.backends.jax_backend._svd_jax:1
+#: tensorcircuit.backends.tensorflow_backend._svd_tf:1
#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:1
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:1
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:1
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:1
msgid "Computes the singular value decomposition (SVD) of a tensor."
msgstr ""
#: of tensorcircuit.backends.jax_backend._svd_jax:3
+#: tensorcircuit.backends.tensorflow_backend._svd_tf:3
#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:3
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:3
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:3
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:3
msgid ""
"The SVD is performed by treating the tensor as a matrix, with an "
"effective left (row) index resulting from combining the axes "
@@ -10843,10 +11375,10 @@ msgid ""
msgstr ""
#: of tensorcircuit.backends.jax_backend._svd_jax:8
+#: tensorcircuit.backends.tensorflow_backend._svd_tf:8
#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:8
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:8
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:8
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:8
msgid ""
"For example, if `tensor` had a shape (2, 3, 4, 5) and `pivot_axis` was 2,"
" then `u` would have shape (2, 3, 6), `s` would have shape (6), and `vh` "
@@ -10854,20 +11386,20 @@ msgid ""
msgstr ""
#: of tensorcircuit.backends.jax_backend._svd_jax:12
+#: tensorcircuit.backends.tensorflow_backend._svd_tf:12
#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:12
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:12
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:12
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:12
msgid ""
"If `max_singular_values` is set to an integer, the SVD is truncated to "
"keep at most this many singular values."
msgstr ""
#: of tensorcircuit.backends.jax_backend._svd_jax:15
+#: tensorcircuit.backends.tensorflow_backend._svd_tf:15
#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:15
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:15
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:15
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:15
msgid ""
"If `max_truncation_error > 0`, as many singular values will be truncated "
"as possible, so that the truncation error (the norm of discarded singular"
@@ -10880,7 +11412,6 @@ msgstr ""
#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:21
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:21
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:21
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:21
msgid ""
"If both `max_singular_values` and `max_truncation_error` are specified, "
"the number of retained singular values will be `min(max_singular_values, "
@@ -10893,7 +11424,6 @@ msgstr ""
#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:27
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:27
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:27
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:27
msgid "The output consists of three tensors `u, s, vh` such that: ```python"
msgstr ""
@@ -10901,7 +11431,6 @@ msgstr ""
#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:29
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:29
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:29
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:29
msgid "u[i1,...,iN, j] * s[j] * vh[j, k1,...,kM] == tensor[i1,...,iN, k1,...,kM]"
msgstr ""
@@ -10909,7 +11438,6 @@ msgstr ""
#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:30
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:30
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:30
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:30
msgid ""
"``` Note that the output ordering matches numpy.linalg.svd rather than "
"tf.svd."
@@ -10923,7 +11451,6 @@ msgstr ""
#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:33
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:33
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:33
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:33
msgid "A tensor to be decomposed."
msgstr ""
@@ -10935,7 +11462,6 @@ msgstr ""
#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:34
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:34
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:34
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:34
msgid "Where to split the tensor's axes before flattening into a matrix."
msgstr ""
@@ -10943,7 +11469,6 @@ msgstr ""
#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:36
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:36
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:36
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:36
msgid "The number of singular values to keep, or `None` to keep them all."
msgstr ""
@@ -10951,7 +11476,6 @@ msgstr ""
#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:38
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:38
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:38
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:38
msgid "The maximum allowed truncation error or `None` to not do any truncation."
msgstr ""
@@ -10961,7 +11485,6 @@ msgstr ""
#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:40
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:40
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:40
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:40
msgid "Multiply `max_truncation_err` with the largest singular value."
msgstr ""
@@ -10969,7 +11492,6 @@ msgstr ""
#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:42
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:42
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:42
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:42
msgid ""
"Left tensor factor. s: Vector of ordered singular values from largest to "
"smallest. vh: Right tensor factor. s_rest: Vector of discarded singular "
@@ -10980,7 +11502,6 @@ msgstr ""
#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:42
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:42
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:42
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:42
msgid ""
"Left tensor factor. s: Vector of ordered singular values from largest to "
"smallest. vh: Right tensor factor. s_rest: Vector of discarded singular "
@@ -10988,10 +11509,10 @@ msgid ""
msgstr ""
#: of tensorcircuit.backends.jax_backend._svd_jax:46
+#: tensorcircuit.backends.tensorflow_backend._svd_tf:49
#: tensornetwork.backends.abstract_backend.AbstractBackend.svd:46
#: tensornetwork.backends.numpy.numpy_backend.NumPyBackend.svd:46
#: tensornetwork.backends.pytorch.pytorch_backend.PyTorchBackend.svd:46
-#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.svd:46
msgid "truncation)."
msgstr ""
@@ -12039,6 +12560,62 @@ msgid ""
"backend. The TensorFlow version returns y[i] = x[i] / abs(x[i])."
msgstr ""
+#: of tensorcircuit.backends.tensorflow_backend._svd_tf:21
+msgid ""
+"If both `max_singular_values` snd `max_truncation_error` are specified, "
+"the number of retained singular values will be `min(max_singular_values, "
+"nsv_auto_trunc)`, where `nsv_auto_trunc` is the number of singular values"
+" that must be kept to maintain a truncation error smaller than "
+"`max_truncation_error`."
+msgstr ""
+
+#: of tensorcircuit.backends.tensorflow_backend._svd_tf:27
+msgid ""
+"The output consists of three tensors `u, s, vh` such that: ```python "
+"u[i1,...,iN, j] * s[j] * vh[j, k1,...,kM] == tensor[i1,...,iN, k1,...,kM]"
+" ``` Note that the output ordering matches numpy.linalg.svd rather than "
+"tf.svd."
+msgstr ""
+
+#: of tensorcircuit.backends.tensorflow_backend._svd_tf:33
+msgid ""
+"Args: tf: The tensorflow module. tensor: A tensor to be decomposed. "
+"pivot_axis: Where to split the tensor's axes before flattening into a"
+msgstr ""
+
+#: of tensorcircuit.backends.tensorflow_backend._svd_tf:37
+msgid "matrix."
+msgstr ""
+
+#: of tensorcircuit.backends.tensorflow_backend._svd_tf:38
+msgid "max_singular_values: The number of singular values to keep, or `None` to"
+msgstr ""
+
+#: of tensorcircuit.backends.tensorflow_backend._svd_tf:39
+msgid "keep them all."
+msgstr ""
+
+#: of tensorcircuit.backends.tensorflow_backend._svd_tf:40
+msgid ""
+"max_truncation_error: The maximum allowed truncation error or `None` to "
+"not"
+msgstr ""
+
+#: of tensorcircuit.backends.tensorflow_backend._svd_tf:41
+msgid "do any truncation."
+msgstr ""
+
+#: of tensorcircuit.backends.tensorflow_backend._svd_tf:42
+msgid "relative: Multiply `max_truncation_err` with the largest singular value."
+msgstr ""
+
+#: of tensorcircuit.backends.tensorflow_backend._svd_tf:44
+msgid ""
+"Returns: u: Left tensor factor. s: Vector of ordered singular values from"
+" largest to smallest. vh: Right tensor factor. s_rest: Vector of "
+"discarded singular values (length zero if no"
+msgstr ""
+
#: of
#: tensornetwork.backends.tensorflow.tensorflow_backend.TensorFlowBackend.trace:11
msgid ""
@@ -12907,15 +13484,15 @@ msgid "``Circuit`` class. Simple usage demo below."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **ANY** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.any_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:4
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:4
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:5
#: tensorcircuit.circuit.Circuit.apply_general_kraus_delayed..apply:4
#: tensorcircuit.densitymatrix.DMCircuit.apply_general_kraus_delayed..apply:4
#: tensorcircuit.densitymatrix.DMCircuit2.apply_general_kraus_delayed..apply:4
@@ -12923,20 +13500,20 @@ msgid "Qubit number that the gate applies on."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:6
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:7
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:6
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:7
msgid "Parameters for the gate."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **CNOT** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.cnot_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j\\\\"
@@ -12945,12 +13522,12 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid "Qubit number that the gate applies on. The matrix for the gate is"
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
"1.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j & 0.+0.j & 0.+0.j & 1.+0.j\\\\ "
@@ -12958,56 +13535,56 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **CPHASE** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.cphase_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **CR** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.cr_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **CRX** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.crx_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **CRY** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.cry_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **CRZ** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.crz_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **CU** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.cu_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **CY** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.cy_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j\\\\"
@@ -13016,7 +13593,7 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
"1.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j & 0.+0.j & 0.+0.j & 0.-1.j\\\\ "
@@ -13024,14 +13601,14 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **CZ** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.cz_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j\\\\"
@@ -13040,7 +13617,7 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
"1.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j & 0.+0.j\\\\ "
@@ -13048,28 +13625,28 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **EXP** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.exp_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **EXP1** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.exp1_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **FREDKIN** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.fredkin_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j & "
@@ -13085,7 +13662,7 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j & 0.+0.j & 0.+0.j &"
" 0.+0.j & 0.+0.j\\\\ 0.+0.j & 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j & "
@@ -13100,14 +13677,14 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **H** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.h_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 0.70710677+0.j & 0.70710677+0.j\\\\ "
@@ -13115,21 +13692,21 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 0.70710677+0.j & 0.70710677+0.j\\\\ 0.70710677+0.j"
" & -0.70710677+0.j \\end{bmatrix}"
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **I** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.i_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ 0.+0.j & 1.+0.j "
@@ -13137,61 +13714,61 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid "\\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ 0.+0.j & 1.+0.j \\end{bmatrix}"
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **ISWAP** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.iswap_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply mpo gate in MPO format on the circuit. See "
":py:meth:`tensorcircuit.gates.mpo_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply multicontrol gate in MPO format on the circuit. See "
":py:meth:`tensorcircuit.gates.multicontrol_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **ORX** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.orx_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **ORY** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.ory_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **ORZ** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.orz_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **OX** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.ox_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 0.+0.j & 1.+0.j & 0.+0.j & 0.+0.j\\\\"
@@ -13200,7 +13777,7 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 0.+0.j & 1.+0.j & 0.+0.j & 0.+0.j\\\\ 1.+0.j & "
"0.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j & 0.+0.j\\\\ "
@@ -13208,14 +13785,14 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **OY** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.oy_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 0.+0.j & 0.-1.j & 0.+0.j & 0.+0.j\\\\"
@@ -13224,7 +13801,7 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 0.+0.j & 0.-1.j & 0.+0.j & 0.+0.j\\\\ 0.+1.j & "
"0.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j & 0.+0.j\\\\ "
@@ -13232,14 +13809,14 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **OZ** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.oz_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j\\\\"
@@ -13248,7 +13825,7 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
"-1.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j & 0.+0.j & 1.+0.j & 0.+0.j\\\\"
@@ -13256,70 +13833,70 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **PHASE** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.phase_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **R** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.r_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **RX** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.rx_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **RXX** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.rxx_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **RY** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.ry_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **RYY** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.ryy_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **RZ** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.rz_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **RZZ** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.rzz_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **S** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.s_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ 0.+0.j & 0.+1.j "
@@ -13327,19 +13904,19 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid "\\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ 0.+0.j & 0.+1.j \\end{bmatrix}"
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **SD** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.sd_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ 0.+0.j & 0.-1.j "
@@ -13347,19 +13924,19 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid "\\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ 0.+0.j & 0.-1.j \\end{bmatrix}"
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **SWAP** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.swap_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j\\\\"
@@ -13368,7 +13945,7 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j\\\\ 0.+0.j & "
"0.+0.j & 1.+0.j & 0.+0.j\\\\ 0.+0.j & 1.+0.j & 0.+0.j & 0.+0.j\\\\ "
@@ -13376,14 +13953,14 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **T** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.t_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1. & +0.j & 0. & +0.j\\\\ 0. & +0.j "
@@ -13391,21 +13968,21 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 1. & +0.j & 0. & +0.j\\\\ 0. & +0.j & "
"0.70710677+0.70710677j \\end{bmatrix}"
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **TD** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.td_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1. & +0.j & 0. & +0.j\\\\ 0. & +0.j "
@@ -13413,21 +13990,21 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 1. & +0.j & 0. & +0.j\\\\ 0. & +0.j & "
"0.70710677-0.70710677j \\end{bmatrix}"
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **TOFFOLI** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.toffoli_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j & "
@@ -13443,7 +14020,7 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j & 0.+0.j & 0.+0.j &"
" 0.+0.j & 0.+0.j\\\\ 0.+0.j & 1.+0.j & 0.+0.j & 0.+0.j & 0.+0.j & "
@@ -13458,21 +14035,21 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_variable_gate_delayed..apply_list:1
msgid ""
"Apply **U** gate with parameters on the circuit. See "
":py:meth:`tensorcircuit.gates.u_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **WROOT** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.wroot_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 0.70710677+0.j & -0.5 & -0.5j\\\\ "
@@ -13480,21 +14057,21 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid ""
"\\begin{bmatrix} 0.70710677+0.j & -0.5 & -0.5j\\\\ 0.5 & -0.5j & "
"0.70710677+0.j \\end{bmatrix}"
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **X** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.x_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 0.+0.j & 1.+0.j\\\\ 1.+0.j & 0.+0.j "
@@ -13502,19 +14079,19 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid "\\begin{bmatrix} 0.+0.j & 1.+0.j\\\\ 1.+0.j & 0.+0.j \\end{bmatrix}"
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **Y** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.y_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 0.+0.j & 0.-1.j\\\\ 0.+1.j & 0.+0.j "
@@ -13522,19 +14099,19 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid "\\begin{bmatrix} 0.+0.j & 0.-1.j\\\\ 0.+1.j & 0.+0.j \\end{bmatrix}"
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:1
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:1
msgid ""
"Apply **Z** gate on the circuit. See "
":py:meth:`tensorcircuit.gates.z_gate`."
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:5
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:5
msgid ""
"Qubit number that the gate applies on. The matrix for the gate is .. "
"math:: \\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ 0.+0.j & -1.+0.j"
@@ -13542,7 +14119,7 @@ msgid ""
msgstr ""
#: of
-#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply:8
+#: tensorcircuit.abstractcircuit.AbstractCircuit.apply_general_gate_delayed..apply_list:8
msgid "\\begin{bmatrix} 1.+0.j & 0.+0.j\\\\ 0.+0.j & -1.+0.j \\end{bmatrix}"
msgstr ""
@@ -14659,6 +15236,34 @@ msgstr ""
msgid "Experimental features"
msgstr ""
+#: of tensorcircuit.experimental.evol_global:1
+msgid ""
+"ode evolution of time dependent Hamiltonian on circuit of all qubits "
+"[only jax backend support for now]"
+msgstr ""
+
+#: of tensorcircuit.experimental.evol_global:6
+msgid ""
+"h_fun should return a **SPARSE** Hamiltonian matrix with input arguments "
+"time and *args"
+msgstr ""
+
+#: of tensorcircuit.experimental.evol_local:1
+msgid ""
+"ode evolution of time dependent Hamiltonian on circuit of given indices "
+"[only jax backend support for now]"
+msgstr ""
+
+#: of tensorcircuit.experimental.evol_local:8
+msgid ""
+"h_fun should return a dense Hamiltonian matrix with input arguments time "
+"and *args"
+msgstr ""
+
+#: of tensorcircuit.experimental.evol_local:11
+msgid "evolution time"
+msgstr ""
+
#: of tensorcircuit.experimental.hamiltonian_evol:1
msgid ""
"Fast implementation of static full Hamiltonian evolution (default as "
@@ -17478,15 +18083,184 @@ msgid ""
"https://qiskit.org/documentation/stubs/qiskit.visualization.plot_histogram.html"
msgstr ""
-#: ../../source/api/results/readout_mitigation.rst:2
-msgid "tensorcircuit.results.readout_mitigation"
+#: of tensorcircuit.results.counts.plot_histogram:4
+msgid "interesting kw options include: ``number_to_keep`` (int)"
msgstr ""
-#: of tensorcircuit.results.readout_mitigation:1
-msgid "readout error mitigation functionalities"
+#: ../../source/api/results/qem.rst:2
+msgid "tensorcircuit.results.qem"
msgstr ""
-#: of tensorcircuit.results.readout_mitigation.ReadoutMit.__init__:1
+#: ../../source/api/results/qem/benchmark_circuits.rst:2
+msgid "tensorcircuit.results.qem.benchmark_circuits"
+msgstr ""
+
+#: of tensorcircuit.results.qem.benchmark_circuits:1
+msgid "circuits for quantum chip benchmark"
+msgstr ""
+
+#: ../../source/api/results/qem/qem_methods.rst:2
+msgid "tensorcircuit.results.qem.qem_methods"
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods:1
+msgid "quantum error mitigation functionalities"
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.add_dd:1
+msgid "Add DD sequence to A circuit"
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.add_dd:3
+#: tensorcircuit.results.qem.qem_methods.prune_ddcircuit:4
+msgid "circuit"
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.add_dd:5
+msgid "The rule to conduct the DD sequence"
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.add_dd:7
+#: tensorcircuit.results.qem.qem_methods.prune_ddcircuit:8
+msgid "new circuit"
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.apply_dd:1
+msgid "Apply dynamic decoupling (DD) and return the mitigated results."
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.apply_dd:4
+#: tensorcircuit.results.qem.qem_methods.apply_zne:3
+msgid "The aim circuit."
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.apply_dd:6
+#: tensorcircuit.results.qem.qem_methods.apply_rc:5
+msgid "A executor that executes a circuit and return results."
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.apply_dd:8
+msgid ""
+"The rule to construct DD sequence, can use default rule "
+"\"dd_option.rules.xx\""
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.apply_dd:9
+msgid ""
+"or custom rule \"['X','X']\" :type rule: Union[Callable[[int], Any], "
+"List[str]] :param rule_args:An optional dictionary of keyword arguments "
+"for ``rule``, defaults to {}. :type rule_args: Dict[str, Any], optional "
+":param num_trials: The number of independent experiments to average over,"
+" defaults to 1 :type num_trials: int, optional :param full_output: If "
+"``False`` only the mitigated expectation value is"
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.apply_dd:16
+msgid ""
+"returned. If ``True`` a dictionary containing all DD data is returned "
+"too, defaults to False"
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.apply_dd:19
+msgid "ignore the DD sequences that added to unused qubits, defaults to True"
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.apply_dd:21
+msgid "dd sequence full fill the idle circuits, defaults to False"
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.apply_dd:23
+#: tensorcircuit.results.qem.qem_methods.apply_rc:11
+msgid "whether the output is bit string, defaults to False"
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.apply_dd:25
+msgid ""
+"mitigated expectation value or mitigated expectation value and DD circuit"
+" information"
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.apply_rc:1
+msgid "Apply Randomized Compiling or Pauli twirling on two-qubit gates."
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.apply_rc:3
+msgid "Input circuit"
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.apply_rc:7
+msgid "Number of circuits for RC, defaults to 1"
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.apply_rc:9
+msgid ""
+"Whether simplify the circuits by merging single qubit gates, defaults to "
+"True"
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.apply_rc:13
+msgid "Mitigated results by RC"
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.apply_zne:1
+msgid "Apply zero-noise extrapolation (ZNE) and return the mitigated results."
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.apply_zne:5
+msgid ""
+"A executor that executes a single circuit or a batch of circuits and "
+"return results."
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.apply_zne:7
+msgid "Determines the extropolation method."
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.apply_zne:9
+msgid "The scaling function for the aim circuit, defaults to fold_gates_at_random"
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.apply_zne:11
+msgid ""
+"Number of times expectation values are computed by the executor, average "
+"each point, defaults to 1."
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.apply_zne:14
+msgid "Mitigated average value by ZNE."
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.prune_ddcircuit:1
+msgid ""
+"Discard DD sequence on idle qubits and Discard identity gate (no "
+"identity/idle gate on device now) filled in DD sequence."
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.prune_ddcircuit:6
+msgid "qubit list to apply DD sequence"
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.used_qubits:1
+msgid "Create a qubit list that includes all qubits having gate manipulation."
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.used_qubits:3
+msgid "a circuit"
+msgstr ""
+
+#: of tensorcircuit.results.qem.qem_methods.used_qubits:5
+msgid "qubit list"
+msgstr ""
+
+#: ../../source/api/results/readout_mitigation.rst:2
+msgid "tensorcircuit.results.readout_mitigation"
+msgstr ""
+
+#: of tensorcircuit.results.readout_mitigation:1
+msgid "readout error mitigation functionalities"
+msgstr ""
+
+#: of tensorcircuit.results.readout_mitigation.ReadoutMit.__init__:1
msgid "The Class for readout error mitigation"
msgstr ""
@@ -17846,24 +18620,6 @@ msgstr ""
msgid "Useful utilities for quantum chemistry related task"
msgstr ""
-#: of tensorcircuit.templates.chems.get_ps:1
-msgid ""
-"Get Pauli string array and weights array for a qubit Hamiltonian as a sum"
-" of Pauli strings defined in openfermion ``QubitOperator``."
-msgstr ""
-
-#: of tensorcircuit.templates.chems.get_ps:4
-msgid "``openfermion.ops.operators.qubit_operator.QubitOperator``"
-msgstr ""
-
-#: of tensorcircuit.templates.chems.get_ps:6
-msgid "The number of qubits"
-msgstr ""
-
-#: of tensorcircuit.templates.chems.get_ps:8
-msgid "Pauli String array and weights array"
-msgstr ""
-
#: ../../source/api/templates/dataset.rst:2
msgid "tensorcircuit.templates.dataset"
msgstr ""
@@ -24097,3 +24853,1044 @@ msgstr ""
#~ msgid "Returns a dictionary containing a whole state of the module."
#~ msgstr ""
+#~ msgid ""
+#~ "Visualise the circuit. This method "
+#~ "recevies the keywords as same as "
+#~ "qiskit.circuit.QuantumCircuit.draw. More details can"
+#~ " be found here: "
+#~ "https://qiskit.org/documentation/stubs/qiskit.circuit.QuantumCircuit.draw.html."
+#~ msgstr ""
+
+#~ msgid "the corresponding qubit"
+#~ msgstr ""
+
+#~ msgid "Bases: :py:class:`~keras.engine.training.Model`"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "This method can also be called "
+#~ "directly on a Functional Model during"
+#~ " construction. In this case, any loss"
+#~ " Tensors passed to this Model must"
+#~ " be symbolic and be able to be"
+#~ " traced back to the model's `Input`s."
+#~ " These losses become part of the "
+#~ "model's topology and are tracked in "
+#~ "`get_config`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Additional keyword arguments for backward "
+#~ "compatibility. Accepted values: inputs - "
+#~ "Deprecated, will be automatically inferred."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Additional keyword arguments for backward "
+#~ "compatibility. Accepted values:"
+#~ msgstr ""
+
+#~ msgid "inputs - Deprecated, will be automatically inferred."
+#~ msgstr ""
+
+#~ msgid "Deprecated, will be automatically inferred."
+#~ msgstr ""
+
+#~ msgid "Whether to use `ResourceVariable`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "When giving unsupported dtype and no "
+#~ "initializer or when trainable has "
+#~ "been set to True with synchronization"
+#~ " set as `ON_READ`."
+#~ msgstr ""
+
+#~ msgid "This is an alias of `self.__call__`."
+#~ msgstr ""
+
+#~ msgid "Input tensor(s)."
+#~ msgstr ""
+
+#~ msgid "additional positional arguments to be passed to `self.call`."
+#~ msgstr ""
+
+#~ msgid "additional keyword arguments to be passed to `self.call`."
+#~ msgstr ""
+
+#~ msgid "Output tensor(s)."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "1. In case of invalid user-"
+#~ "provided data (not of type tuple,"
+#~ " list, `TensorShape`, or dict). "
+#~ "2. If the model requires call "
+#~ "arguments that are agnostic to "
+#~ "the input shapes (positional or keyword"
+#~ " arg in call signature). 3. If"
+#~ " not all layers were properly built."
+#~ " 4. If float type inputs are "
+#~ "not supported within the layers."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "In case of invalid user-provided "
+#~ "data (not of type tuple, list,"
+#~ " `TensorShape`, or dict). 2. If "
+#~ "the model requires call arguments that"
+#~ " are agnostic to the input "
+#~ "shapes (positional or keyword arg in "
+#~ "call signature). 3. If not all "
+#~ "layers were properly built. 4. If"
+#~ " float type inputs are not supported"
+#~ " within the layers."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "A mask or list of masks. A "
+#~ "mask can be either a boolean "
+#~ "tensor or None (no mask). For more"
+#~ " details, check the guide "
+#~ "[here](https://www.tensorflow.org/guide/keras/masking_and_padding)."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "A mask or list of masks. A "
+#~ "mask can be either a boolean "
+#~ "tensor or None (no mask). For more"
+#~ " details, check the guide"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Loss function. Maybe be a string "
+#~ "(name of loss function), or a "
+#~ "`tf.keras.losses.Loss` instance. See "
+#~ "`tf.keras.losses`. A loss function is "
+#~ "any callable with the signature `loss"
+#~ " = fn(y_true, y_pred)`, where `y_true` "
+#~ "are the ground truth values, and "
+#~ "`y_pred` are the model's predictions. "
+#~ "`y_true` should have shape `(batch_size, "
+#~ "d0, .. dN)` (except in the case"
+#~ " of sparse loss functions such as "
+#~ "sparse categorical crossentropy which expects"
+#~ " integer arrays of shape `(batch_size, "
+#~ "d0, .. dN-1)`). `y_pred` should have "
+#~ "shape `(batch_size, d0, .. dN)`. The "
+#~ "loss function should return a float "
+#~ "tensor. If a custom `Loss` instance "
+#~ "is used and reduction is set to"
+#~ " `None`, return value has shape "
+#~ "`(batch_size, d0, .. dN-1)` i.e. per-"
+#~ "sample or per-timestep loss values; "
+#~ "otherwise, it is a scalar. If the"
+#~ " model has multiple outputs, you can"
+#~ " use a different loss on each "
+#~ "output by passing a dictionary or "
+#~ "a list of losses. The loss value"
+#~ " that will be minimized by the "
+#~ "model will then be the sum of "
+#~ "all individual losses, unless `loss_weights`"
+#~ " is specified."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "List of metrics to be evaluated by"
+#~ " the model during training and "
+#~ "testing. Each of this can be a "
+#~ "string (name of a built-in "
+#~ "function), function or a "
+#~ "`tf.keras.metrics.Metric` instance. See "
+#~ "`tf.keras.metrics`. Typically you will use "
+#~ "`metrics=['accuracy']`. A function is any "
+#~ "callable with the signature `result ="
+#~ " fn(y_true, y_pred)`. To specify different"
+#~ " metrics for different outputs of a"
+#~ " multi-output model, you could also"
+#~ " pass a dictionary, such as "
+#~ "`metrics={'output_a': 'accuracy', 'output_b': "
+#~ "['accuracy', 'mse']}`. You can also pass"
+#~ " a list to specify a metric or"
+#~ " a list of metrics for each "
+#~ "output, such as `metrics=[['accuracy'], "
+#~ "['accuracy', 'mse']]` or `metrics=['accuracy', "
+#~ "['accuracy', 'mse']]`. When you pass the"
+#~ " strings 'accuracy' or 'acc', we "
+#~ "convert this to one of "
+#~ "`tf.keras.metrics.BinaryAccuracy`, "
+#~ "`tf.keras.metrics.CategoricalAccuracy`, "
+#~ "`tf.keras.metrics.SparseCategoricalAccuracy` based on "
+#~ "the loss function used and the "
+#~ "model output shape. We do a "
+#~ "similar conversion for the strings "
+#~ "'crossentropy' and 'ce' as well."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Optional list or dictionary specifying "
+#~ "scalar coefficients (Python floats) to "
+#~ "weight the loss contributions of "
+#~ "different model outputs. The loss value"
+#~ " that will be minimized by the "
+#~ "model will then be the *weighted "
+#~ "sum* of all individual losses, weighted"
+#~ " by the `loss_weights` coefficients. If"
+#~ " a list, it is expected to have"
+#~ " a 1:1 mapping to the model's "
+#~ "outputs. If a dict, it is expected"
+#~ " to map output names (strings) to"
+#~ " scalar coefficients."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Optional list or dictionary specifying "
+#~ "scalar coefficients (Python floats) to "
+#~ "weight the loss contributions of "
+#~ "different model outputs. The loss value"
+#~ " that will be minimized by the "
+#~ "model will then be the *weighted "
+#~ "sum* of all individual losses, weighted"
+#~ " by the `loss_weights` coefficients."
+#~ msgstr ""
+
+#~ msgid "If a list, it is expected to have a 1:1 mapping to the model's"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "outputs. If a dict, it is expected"
+#~ " to map output names (strings) to "
+#~ "scalar coefficients."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Bool. Defaults to `False`. If `True`,"
+#~ " this `Model`'s logic will not be "
+#~ "wrapped in a `tf.function`. Recommended "
+#~ "to leave this as `None` unless "
+#~ "your `Model` cannot be run inside "
+#~ "a `tf.function`. `run_eagerly=True` is not "
+#~ "supported when using "
+#~ "`tf.distribute.experimental.ParameterServerStrategy`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Int. Defaults to 1. The number of"
+#~ " batches to run during each "
+#~ "`tf.function` call. Running multiple batches"
+#~ " inside a single `tf.function` call "
+#~ "can greatly improve performance on TPUs"
+#~ " or small models with a large "
+#~ "Python overhead. At most, one full "
+#~ "epoch will be run each execution. "
+#~ "If a number larger than the size"
+#~ " of the epoch is passed, the "
+#~ "execution will be truncated to the "
+#~ "size of the epoch. Note that if"
+#~ " `steps_per_execution` is set to `N`, "
+#~ "`Callback.on_batch_begin` and `Callback.on_batch_end` "
+#~ "methods will only be called every "
+#~ "`N` batches (i.e. before/after each "
+#~ "`tf.function` execution)."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "If the layer has not been built,"
+#~ " this method will call `build` on "
+#~ "the layer. This assumes that the "
+#~ "layer will later be used with "
+#~ "inputs that match the input shape "
+#~ "provided here."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Shape tuple (tuple of integers) or "
+#~ "list of shape tuples (one per "
+#~ "output tensor of the layer). Shape "
+#~ "tuples can include None for free "
+#~ "dimensions, instead of an integer."
+#~ msgstr ""
+
+#~ msgid "An input shape tuple."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Single TensorSpec or nested structure of"
+#~ " TensorSpec objects, describing how the"
+#~ " layer would transform the provided "
+#~ "input."
+#~ msgstr ""
+
+#~ msgid "Single TensorSpec or nested structure of TensorSpec objects, describing"
+#~ msgstr ""
+
+#~ msgid "how the layer would transform the provided input."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Input data. It could be: - A "
+#~ "Numpy array (or array-like), or a"
+#~ " list of arrays (in case the "
+#~ "model has multiple inputs). - A "
+#~ "TensorFlow tensor, or a list of "
+#~ "tensors (in case the model has "
+#~ "multiple inputs). - A dict mapping "
+#~ "input names to the corresponding "
+#~ "array/tensors, if the model has named"
+#~ " inputs. - A `tf.data` dataset. "
+#~ "Should return a tuple of either "
+#~ "`(inputs, targets)` or `(inputs, targets,"
+#~ " sample_weights)`. - A generator or "
+#~ "`keras.utils.Sequence` returning `(inputs, targets)`"
+#~ " or `(inputs, targets, sample_weights)`. "
+#~ "A more detailed description of unpacking"
+#~ " behavior for iterator types (Dataset, "
+#~ "generator, Sequence) is given in the "
+#~ "`Unpacking behavior for iterator-like "
+#~ "inputs` section of `Model.fit`."
+#~ msgstr ""
+
+#~ msgid "0 or 1. Verbosity mode. 0 = silent, 1 = progress bar."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Optional Numpy array of weights for "
+#~ "the test samples, used for weighting "
+#~ "the loss function. You can either "
+#~ "pass a flat (1D) Numpy array with"
+#~ " the same length as the input "
+#~ "samples (1:1 mapping between weights "
+#~ "and samples), or in the case of"
+#~ " temporal data, you can pass a"
+#~ " 2D array with shape `(samples, "
+#~ "sequence_length)`, to apply a different "
+#~ "weight to every timestep of every"
+#~ " sample. This argument is not "
+#~ "supported when `x` is a dataset, "
+#~ "instead pass sample weights as the "
+#~ "third element of `x`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "List of `keras.callbacks.Callback` instances. "
+#~ "List of callbacks to apply during "
+#~ "evaluation. See "
+#~ "[callbacks](/api_docs/python/tf/keras/callbacks)."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "`Model.evaluate` is not yet supported "
+#~ "with `tf.distribute.experimental.ParameterServerStrategy`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Trains the model for a fixed "
+#~ "number of epochs (iterations on a "
+#~ "dataset)."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Input data. It could be: - A "
+#~ "Numpy array (or array-like), or a"
+#~ " list of arrays (in case the "
+#~ "model has multiple inputs). - A "
+#~ "TensorFlow tensor, or a list of "
+#~ "tensors (in case the model has "
+#~ "multiple inputs). - A dict mapping "
+#~ "input names to the corresponding "
+#~ "array/tensors, if the model has named"
+#~ " inputs. - A `tf.data` dataset. "
+#~ "Should return a tuple of either "
+#~ "`(inputs, targets)` or `(inputs, targets,"
+#~ " sample_weights)`. - A generator or "
+#~ "`keras.utils.Sequence` returning `(inputs, targets)`"
+#~ " or `(inputs, targets, sample_weights)`. "
+#~ "- A `tf.keras.utils.experimental.DatasetCreator`, "
+#~ "which wraps a callable that takes "
+#~ "a single argument of type "
+#~ "`tf.distribute.InputContext`, and returns a "
+#~ "`tf.data.Dataset`. `DatasetCreator` should be "
+#~ "used when users prefer to specify "
+#~ "the per-replica batching and sharding"
+#~ " logic for the `Dataset`. See "
+#~ "`tf.keras.utils.experimental.DatasetCreator` doc for "
+#~ "more information. A more detailed "
+#~ "description of unpacking behavior for "
+#~ "iterator types (Dataset, generator, Sequence)"
+#~ " is given below. If using "
+#~ "`tf.distribute.experimental.ParameterServerStrategy`, only "
+#~ "`DatasetCreator` type is supported for "
+#~ "`x`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "A more detailed description of unpacking"
+#~ " behavior for iterator types (Dataset, "
+#~ "generator, Sequence) is given below. If"
+#~ " using `tf.distribute.experimental.ParameterServerStrategy`,"
+#~ " only `DatasetCreator` type is supported"
+#~ " for `x`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "'auto', 0, 1, or 2. Verbosity "
+#~ "mode. 0 = silent, 1 = progress "
+#~ "bar, 2 = one line per epoch. "
+#~ "'auto' defaults to 1 for most "
+#~ "cases, but 2 when used with "
+#~ "`ParameterServerStrategy`. Note that the "
+#~ "progress bar is not particularly useful"
+#~ " when logged to a file, so "
+#~ "verbose=2 is recommended when not "
+#~ "running interactively (eg, in a "
+#~ "production environment)."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Float between 0 and 1. Fraction "
+#~ "of the training data to be used"
+#~ " as validation data. The model will"
+#~ " set apart this fraction of the "
+#~ "training data, will not train on "
+#~ "it, and will evaluate the loss "
+#~ "and any model metrics on this "
+#~ "data at the end of each epoch."
+#~ " The validation data is selected "
+#~ "from the last samples in the `x`"
+#~ " and `y` data provided, before "
+#~ "shuffling. This argument is not "
+#~ "supported when `x` is a dataset, "
+#~ "generator or `keras.utils.Sequence` instance. "
+#~ "`validation_split` is not yet supported "
+#~ "with `tf.distribute.experimental.ParameterServerStrategy`."
+#~ msgstr ""
+
+#~ msgid "Float between 0 and 1."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Fraction of the training data to "
+#~ "be used as validation data. The "
+#~ "model will set apart this fraction "
+#~ "of the training data, will not "
+#~ "train on it, and will evaluate the"
+#~ " loss and any model metrics on "
+#~ "this data at the end of each "
+#~ "epoch. The validation data is selected"
+#~ " from the last samples in the "
+#~ "`x` and `y` data provided, before "
+#~ "shuffling. This argument is not "
+#~ "supported when `x` is a dataset, "
+#~ "generator or"
+#~ msgstr ""
+
+#~ msgid "`keras.utils.Sequence` instance."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "`validation_split` is not yet supported "
+#~ "with `tf.distribute.experimental.ParameterServerStrategy`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Data on which to evaluate the loss"
+#~ " and any model metrics at the "
+#~ "end of each epoch. The model will"
+#~ " not be trained on this data. "
+#~ "Thus, note the fact that the "
+#~ "validation loss of data provided using"
+#~ " `validation_split` or `validation_data` is "
+#~ "not affected by regularization layers "
+#~ "like noise and dropout. `validation_data` "
+#~ "will override `validation_split`. `validation_data`"
+#~ " could be: - A tuple `(x_val, "
+#~ "y_val)` of Numpy arrays or tensors."
+#~ " - A tuple `(x_val, y_val, "
+#~ "val_sample_weights)` of NumPy arrays. - "
+#~ "A `tf.data.Dataset`. - A Python "
+#~ "generator or `keras.utils.Sequence` returning "
+#~ "`(inputs, targets)` or `(inputs, targets, "
+#~ "sample_weights)`. `validation_data` is not yet"
+#~ " supported with "
+#~ "`tf.distribute.experimental.ParameterServerStrategy`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Optional dictionary mapping class indices "
+#~ "(integers) to a weight (float) value,"
+#~ " used for weighting the loss function"
+#~ " (during training only). This can be"
+#~ " useful to tell the model to "
+#~ "\"pay more attention\" to samples from"
+#~ " an under-represented class."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Optional Numpy array of weights for "
+#~ "the training samples, used for weighting"
+#~ " the loss function (during training "
+#~ "only). You can either pass a flat"
+#~ " (1D) Numpy array with the same "
+#~ "length as the input samples (1:1 "
+#~ "mapping between weights and samples), "
+#~ "or in the case of temporal data,"
+#~ " you can pass a 2D array with"
+#~ " shape `(samples, sequence_length)`, to "
+#~ "apply a different weight to every "
+#~ "timestep of every sample. This argument"
+#~ " is not supported when `x` is a"
+#~ " dataset, generator, or `keras.utils.Sequence`"
+#~ " instance, instead provide the "
+#~ "sample_weights as the third element of"
+#~ " `x`."
+#~ msgstr ""
+
+#~ msgid "Optional Numpy array of weights for"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "the training samples, used for weighting"
+#~ " the loss function (during training "
+#~ "only). You can either pass a flat"
+#~ " (1D) Numpy array with the same "
+#~ "length as the input samples (1:1 "
+#~ "mapping between weights and samples), or"
+#~ " in the case of temporal data, "
+#~ "you can pass a 2D array with "
+#~ "shape `(samples, sequence_length)`, to apply"
+#~ " a different weight to every timestep"
+#~ " of every sample. This argument is"
+#~ " not supported when `x` is a "
+#~ "dataset, generator, or"
+#~ msgstr ""
+
+#~ msgid "`keras.utils.Sequence` instance, instead provide the sample_weights"
+#~ msgstr ""
+
+#~ msgid "as the third element of `x`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Integer or `None`. Total number of "
+#~ "steps (batches of samples) before "
+#~ "declaring one epoch finished and "
+#~ "starting the next epoch. When training"
+#~ " with input tensors such as "
+#~ "TensorFlow data tensors, the default "
+#~ "`None` is equal to the number of"
+#~ " samples in your dataset divided by"
+#~ " the batch size, or 1 if that"
+#~ " cannot be determined. If x is "
+#~ "a `tf.data` dataset, and 'steps_per_epoch' "
+#~ "is None, the epoch will run until"
+#~ " the input dataset is exhausted. When"
+#~ " passing an infinitely repeating dataset,"
+#~ " you must specify the `steps_per_epoch` "
+#~ "argument. If `steps_per_epoch=-1` the training"
+#~ " will run indefinitely with an "
+#~ "infinitely repeating dataset. This argument"
+#~ " is not supported with array inputs."
+#~ " When using "
+#~ "`tf.distribute.experimental.ParameterServerStrategy`: * "
+#~ "`steps_per_epoch=None` is not supported."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Integer or `None`. Total number of "
+#~ "steps (batches of samples) before "
+#~ "declaring one epoch finished and "
+#~ "starting the next epoch. When training"
+#~ " with input tensors such as "
+#~ "TensorFlow data tensors, the default "
+#~ "`None` is equal to the number of"
+#~ " samples in your dataset divided by"
+#~ " the batch size, or 1 if that"
+#~ " cannot be determined. If x is "
+#~ "a `tf.data` dataset, and 'steps_per_epoch' "
+#~ "is None, the epoch will run until"
+#~ " the input dataset is exhausted. When"
+#~ " passing an infinitely repeating dataset,"
+#~ " you must specify the `steps_per_epoch` "
+#~ "argument. If `steps_per_epoch=-1` the training"
+#~ " will run indefinitely with an "
+#~ "infinitely repeating dataset. This argument"
+#~ " is not supported with array inputs."
+#~ " When using "
+#~ "`tf.distribute.experimental.ParameterServerStrategy`:"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Only relevant if validation data is "
+#~ "provided. Integer or `collections.abc.Container` "
+#~ "instance (e.g. list, tuple, etc.). If"
+#~ " an integer, specifies how many "
+#~ "training epochs to run before a "
+#~ "new validation run is performed, e.g."
+#~ " `validation_freq=2` runs validation every "
+#~ "2 epochs. If a Container, specifies "
+#~ "the epochs on which to run "
+#~ "validation, e.g. `validation_freq=[1, 2, 10]`"
+#~ " runs validation at the end of "
+#~ "the 1st, 2nd, and 10th epochs."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "tf.keras.utils.Sequence to the `x` argument"
+#~ " of fit, which will in fact "
+#~ "yield not only features (x) but "
+#~ "optionally targets (y) and sample "
+#~ "weights. Keras requires that the output"
+#~ " of such iterator-likes be "
+#~ "unambiguous. The iterator should return "
+#~ "a tuple of length 1, 2, or "
+#~ "3, where the optional second and "
+#~ "third elements will be used for y"
+#~ " and sample_weight respectively. Any other"
+#~ " type provided will be wrapped in "
+#~ "a length one tuple, effectively treating"
+#~ " everything as 'x'. When yielding "
+#~ "dicts, they should still adhere to "
+#~ "the top-level tuple structure. e.g. "
+#~ "`({\"x0\": x0, \"x1\": x1}, y)`. Keras"
+#~ " will not attempt to separate "
+#~ "features, targets, and weights from the"
+#~ " keys of a single dict."
+#~ msgstr ""
+
+#~ msgid "A notable unsupported data type is the namedtuple. The reason is that"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "it behaves like both an ordered "
+#~ "datatype (tuple) and a mapping datatype"
+#~ " (dict). So given a namedtuple of "
+#~ "the form:"
+#~ msgstr ""
+
+#~ msgid "Retrieves losses relevant to a specific set of inputs."
+#~ msgstr ""
+
+#~ msgid "Input tensor or list/tuple of input tensors."
+#~ msgstr ""
+
+#~ msgid "List of loss tensors of the layer that depend on `inputs`."
+#~ msgstr ""
+
+#~ msgid "Retrieves updates relevant to a specific set of inputs."
+#~ msgstr ""
+
+#~ msgid "List of update ops of the layer that depend on `inputs`."
+#~ msgstr ""
+
+#~ msgid "Deprecated, do NOT use! Only for compatibility with external Keras."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Loads all layer weights, either from "
+#~ "a TensorFlow or an HDF5 weight "
+#~ "file."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "If `by_name` is False weights are "
+#~ "loaded based on the network's topology."
+#~ " This means the architecture should "
+#~ "be the same as when the weights"
+#~ " were saved. Note that layers that"
+#~ " don't have weights are not taken "
+#~ "into account in the topological "
+#~ "ordering, so adding or removing layers"
+#~ " is fine as long as they don't"
+#~ " have weights."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "If `by_name` is True, weights are "
+#~ "loaded into layers only if they "
+#~ "share the same name. This is "
+#~ "useful for fine-tuning or transfer-"
+#~ "learning models where some of the "
+#~ "layers have changed."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Only topological loading (`by_name=False`) is"
+#~ " supported when loading weights from "
+#~ "the TensorFlow format. Note that "
+#~ "topological loading differs slightly between"
+#~ " TensorFlow and HDF5 formats for "
+#~ "user-defined classes inheriting from "
+#~ "`tf.keras.Model`: HDF5 loads based on a"
+#~ " flattened list of weights, while the"
+#~ " TensorFlow format loads based on the"
+#~ " object-local names of attributes to"
+#~ " which layers are assigned in the "
+#~ "`Model`'s constructor."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "String, path to the weights file "
+#~ "to load. For weight files in "
+#~ "TensorFlow format, this is the file "
+#~ "prefix (the same as was passed to"
+#~ " `save_weights`). This can also be a"
+#~ " path to a SavedModel saved from "
+#~ "`model.save`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Boolean, whether to load weights by "
+#~ "name or by topological order. Only "
+#~ "topological loading is supported for "
+#~ "weight files in TensorFlow format."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Boolean, whether to skip loading of "
+#~ "layers where there is a mismatch "
+#~ "in the number of weights, or a "
+#~ "mismatch in the shape of the "
+#~ "weight (only valid when `by_name=True`)."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Optional `tf.train.CheckpointOptions` object that"
+#~ " specifies options for loading weights."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "When loading a weight file in "
+#~ "TensorFlow format, returns the same "
+#~ "status object as `tf.train.Checkpoint.restore`. "
+#~ "When graph building, restore ops are "
+#~ "run automatically as soon as the "
+#~ "network is built (on first call "
+#~ "for user-defined classes inheriting from"
+#~ " `Model`, immediately if it is "
+#~ "already built). When loading weights in"
+#~ " HDF5 format, returns `None`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "When loading a weight file in "
+#~ "TensorFlow format, returns the same "
+#~ "status object as `tf.train.Checkpoint.restore`. "
+#~ "When graph building, restore ops are "
+#~ "run automatically as soon as the "
+#~ "network is built (on first call "
+#~ "for user-defined classes inheriting from"
+#~ " `Model`, immediately if it is "
+#~ "already built)."
+#~ msgstr ""
+
+#~ msgid "When loading weights in HDF5 format, returns `None`."
+#~ msgstr ""
+
+#~ msgid "If `h5py` is not available and the weight file is in HDF5 format."
+#~ msgstr ""
+
+#~ msgid "If `skip_mismatch` is set to `True` when `by_name` is `False`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Returns the model's metrics added using"
+#~ " `compile()`, `add_metric()` APIs."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Computation is done in batches. This "
+#~ "method is designed for performance in"
+#~ " large scale inputs. For small amount"
+#~ " of inputs that fit in one "
+#~ "batch, directly using `__call__()` is "
+#~ "recommended for faster execution, e.g., "
+#~ "`model(x)`, or `model(x, training=False)` if"
+#~ " you have layers such as "
+#~ "`tf.keras.layers.BatchNormalization` that behaves "
+#~ "differently during inference. Also, note "
+#~ "the fact that test loss is not "
+#~ "affected by regularization layers like "
+#~ "noise and dropout."
+#~ msgstr ""
+
+#~ msgid "Verbosity mode, 0 or 1."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "List of `keras.callbacks.Callback` instances. "
+#~ "List of callbacks to apply during "
+#~ "prediction. See "
+#~ "[callbacks](/api_docs/python/tf/keras/callbacks)."
+#~ msgstr ""
+
+#~ msgid "If `model.predict_on_batch` is wrapped in a `tf.function`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "This method should contain the "
+#~ "mathematical logic for one step of "
+#~ "inference. This typically includes the "
+#~ "forward pass."
+#~ msgstr ""
+
+#~ msgid "Saves the model to Tensorflow SavedModel or a single HDF5 file."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Please see `tf.keras.models.save_model` or the"
+#~ " [Serialization and Saving "
+#~ "guide](https://keras.io/guides/serialization_and_saving/) for"
+#~ " details."
+#~ msgstr ""
+
+#~ msgid "String, PathLike, path to SavedModel or H5 file to save the model."
+#~ msgstr ""
+
+#~ msgid "If True, save optimizer's state together."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Either `'tf'` or `'h5'`, indicating "
+#~ "whether to save the model to "
+#~ "Tensorflow SavedModel or HDF5. Defaults "
+#~ "to 'tf' in TF 2.X, and 'h5' "
+#~ "in TF 1.X."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Signatures to save with the SavedModel."
+#~ " Applicable to the 'tf' format only."
+#~ " Please see the `signatures` argument "
+#~ "in `tf.saved_model.save` for details."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "(only applies to SavedModel format) "
+#~ "`tf.saved_model.SaveOptions` object that specifies"
+#~ " options for saving to SavedModel."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "(only applies to SavedModel format) When"
+#~ " enabled, the SavedModel will store "
+#~ "the function traces for each layer. "
+#~ "This can be disabled, so that only"
+#~ " the configs of each layer are "
+#~ "stored. Defaults to `True`. Disabling "
+#~ "this will decrease serialization time "
+#~ "and reduce file size, but it "
+#~ "requires that all custom layers/models "
+#~ "implement a `get_config()` method."
+#~ msgstr ""
+
+#~ msgid "```python from keras.models import load_model"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "model.save('my_model.h5') # creates a HDF5"
+#~ " file 'my_model.h5' del model # "
+#~ "deletes the existing model"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "# returns a compiled model # "
+#~ "identical to the previous one model "
+#~ "= load_model('my_model.h5') ```"
+#~ msgstr ""
+
+#~ msgid "Returns the `tf.TensorSpec` of call inputs as a tuple `(args, kwargs)`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "# arg_specs is `[tf.TensorSpec(...), ...]`."
+#~ " kwarg_specs, in this example, is #"
+#~ " an empty dict since functional "
+#~ "models do not use keyword arguments. "
+#~ "arg_specs, kwarg_specs = model.save_spec()"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "'serving_default': serve.get_concrete_function(*arg_specs, "
+#~ "**kwarg_specs)"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "The TensorFlow format matches objects "
+#~ "and variables by starting at a "
+#~ "root object, `self` for `save_weights`, "
+#~ "and greedily matching attribute names. "
+#~ "For `Model.save` this is the `Model`,"
+#~ " and for `Checkpoint.save` this is "
+#~ "the `Checkpoint` even if the "
+#~ "`Checkpoint` has a model attached. This"
+#~ " means saving a `tf.keras.Model` using "
+#~ "`save_weights` and loading into a "
+#~ "`tf.train.Checkpoint` with a `Model` attached"
+#~ " (or vice versa) will not match "
+#~ "the `Model`'s variables. See the [guide"
+#~ " to training "
+#~ "checkpoints](https://www.tensorflow.org/guide/checkpoint) for"
+#~ " details on the TensorFlow format."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Either 'tf' or 'h5'. A `filepath` "
+#~ "ending in '.h5' or '.keras' will "
+#~ "default to HDF5 if `save_format` is "
+#~ "`None`. Otherwise `None` defaults to "
+#~ "'tf'."
+#~ msgstr ""
+
+#~ msgid "If `h5py` is not available when attempting to save in HDF5 format."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Relative or absolute positions of log"
+#~ " elements in each line. If not "
+#~ "provided, defaults to `[.33, .55, .67,"
+#~ " 1.]`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Print function to use. Defaults to "
+#~ "`print`. It will be called on each"
+#~ " line of the summary. You can "
+#~ "set it to a custom function in "
+#~ "order to capture the string summary."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Whether to expand the nested models. "
+#~ "If not provided, defaults to `False`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Input data. It could be: - A "
+#~ "Numpy array (or array-like), or a"
+#~ " list of arrays (in case the "
+#~ "model has multiple inputs). - A "
+#~ "TensorFlow tensor, or a list of "
+#~ "tensors (in case the model has "
+#~ "multiple inputs). - A dict mapping "
+#~ "input names to the corresponding "
+#~ "array/tensors, if the model has "
+#~ "named inputs."
+#~ msgstr ""
+
+#~ msgid "A dict mapping input names to the corresponding array/tensors, if"
+#~ msgstr ""
+
+#~ msgid "the model has named inputs."
+#~ msgstr ""
+
+#~ msgid "If `model.test_on_batch` is wrapped in a `tf.function`."
+#~ msgstr ""
+
+#~ msgid "Additional keyword arguments to be passed to `json.dumps()`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Optional dictionary mapping class indices "
+#~ "(integers) to a weight (float) to "
+#~ "apply to the model's loss for the"
+#~ " samples from this class during "
+#~ "training. This can be useful to "
+#~ "tell the model to \"pay more "
+#~ "attention\" to samples from an under-"
+#~ "represented class."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "This method can be overridden to "
+#~ "support custom training logic. For "
+#~ "concrete examples of how to override "
+#~ "this method see [Customizing what "
+#~ "happends in "
+#~ "fit](https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit)."
+#~ " This method is called by "
+#~ "`Model.make_train_function`."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "This method should contain the "
+#~ "mathematical logic for one step of "
+#~ "training. This typically includes the "
+#~ "forward pass, loss calculation, "
+#~ "backpropagation, and metric updates."
+#~ msgstr ""
+
+#~ msgid "Bases: :py:class:`~keras.engine.base_layer.Layer`"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Input tensor, or dict/list/tuple of "
+#~ "input tensors. The first positional "
+#~ "`inputs` argument is subject to special"
+#~ " rules: - `inputs` must be explicitly"
+#~ " passed. A layer cannot have zero"
+#~ " arguments, and `inputs` cannot be "
+#~ "provided via the default value of "
+#~ "a keyword argument. - NumPy array "
+#~ "or Python scalar values in `inputs` "
+#~ "get cast as tensors. - Keras mask"
+#~ " metadata is only collected from "
+#~ "`inputs`. - Layers are built "
+#~ "(`build(input_shape)` method) using shape "
+#~ "info from `inputs` only. - `input_spec`"
+#~ " compatibility is only checked against "
+#~ "`inputs`. - Mixed precision input "
+#~ "casting is only applied to `inputs`."
+#~ " If a layer has tensor arguments"
+#~ " in `*args` or `**kwargs`, their "
+#~ "casting behavior in mixed precision "
+#~ "should be handled manually. - The "
+#~ "SavedModel input specification is generated"
+#~ " using `inputs` only. - Integration "
+#~ "with various ecosystem packages like "
+#~ "TFMOT, TFLite, TF.js, etc is only "
+#~ "supported for `inputs` and not for "
+#~ "tensors in positional and keyword "
+#~ "arguments."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Additional keyword arguments. May contain "
+#~ "tensors, although this is not "
+#~ "recommended, for the reasons above. The"
+#~ " following optional keyword arguments are"
+#~ " reserved: - `training`: Boolean scalar "
+#~ "tensor of Python boolean indicating "
+#~ "whether the `call` is meant for "
+#~ "training or inference. - `mask`: Boolean"
+#~ " input mask. If the layer's `call()`"
+#~ " method takes a `mask` argument, "
+#~ "its default value will be set to"
+#~ " the mask generated for `inputs` by"
+#~ " the previous layer (if `input` did"
+#~ " come from a layer that generated"
+#~ " a corresponding mask, i.e. if it "
+#~ "came from a Keras layer with "
+#~ "masking support)."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Bases: "
+#~ ":py:class:`~keras.optimizer_v2.learning_rate_schedule.LearningRateSchedule`"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "Get Pauli string array and weights "
+#~ "array for a qubit Hamiltonian as a"
+#~ " sum of Pauli strings defined in "
+#~ "openfermion ``QubitOperator``."
+#~ msgstr ""
+
+#~ msgid "``openfermion.ops.operators.qubit_operator.QubitOperator``"
+#~ msgstr ""
+
+#~ msgid "The number of qubits"
+#~ msgstr ""
+
+#~ msgid "Pauli String array and weights array"
+#~ msgstr ""
+
diff --git a/docs/source/locale/zh/LC_MESSAGES/contribs.po b/docs/source/locale/zh/LC_MESSAGES/contribs.po
index cb11a0fd..004ca724 100644
--- a/docs/source/locale/zh/LC_MESSAGES/contribs.po
+++ b/docs/source/locale/zh/LC_MESSAGES/contribs.po
@@ -9,142 +9,222 @@ msgid ""
msgstr ""
"Project-Id-Version: tensorcircuit \n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2023-02-02 14:19+0800\n"
+"POT-Creation-Date: 2023-07-14 15:43+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME \n"
"Language-Team: LANGUAGE \n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.9.1\n"
+"Generated-By: Babel 2.12.1\n"
+#: ../../source/contribs/development_Mac.md:1
#: ../../source/contribs/development_MacARM.md:1
+#: ../../source/contribs/development_MacM2.md:1
msgid "Tensorcircuit Installation Guide on MacOS"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:3
-msgid "Contributed by Mark (Zixuan) Song"
+#: ../../source/contribs/development_Mac.md:3
+msgid "Contributed by [_Mark (Zixuan) Song_](https://marksong.tech)"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:5
+#: ../../source/contribs/development_Mac.md:5
+msgid ""
+"Apple has updated Tensorflow (for MacOS) so that installation on M-series"
+" (until M2) and Intel-series Mac can follow the exact same procedure."
+msgstr ""
+
+#: ../../source/contribs/development_Mac.md:7
+#: ../../source/contribs/development_MacARM.md:8
+#: ../../source/contribs/development_MacM2.md:10
msgid "Starting From Scratch"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:7
-msgid "For completely new macos or macos without xcode and brew"
+#: ../../source/contribs/development_Mac.md:9
+msgid "For completely new Macos or Macos without Xcode and Homebrew installed."
msgstr ""
-#: ../../source/contribs/development_MacARM.md:9
+#: ../../source/contribs/development_Mac.md:11
+#: ../../source/contribs/development_MacARM.md:12
+#: ../../source/contribs/development_MacM2.md:12
msgid "Install Xcode Command Line Tools"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:11
+#: ../../source/contribs/development_Mac.md:13
+#: ../../source/contribs/development_MacARM.md:14
+#: ../../source/contribs/development_MacM2.md:14
msgid "Need graphical access to the machine."
msgstr ""
-#: ../../source/contribs/development_MacARM.md:13
+#: ../../source/contribs/development_Mac.md:15
+#: ../../source/contribs/development_MacARM.md:16
+#: ../../source/contribs/development_MacM2.md:16
msgid "Run `xcode-select --install` to install if on optimal internet."
msgstr ""
-#: ../../source/contribs/development_MacARM.md:15
+#: ../../source/contribs/development_Mac.md:17
msgid ""
-"Or Download from [Apple](https://developer.apple.com/download/more/) "
-"Command Line Tools installation image then install if internet connection"
-" is weak."
+"Or Download it from [Apple](https://developer.apple.com/download/more/) "
+"Command Line Tools installation image then install it if the internet "
+"connection is weak."
msgstr ""
-#: ../../source/contribs/development_MacARM.md:17
+#: ../../source/contribs/development_Mac.md:19
+#: ../../source/contribs/development_MacARM.md:20
+#: ../../source/contribs/development_MacM2.md:20
msgid "Install Miniconda"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:19
+#: ../../source/contribs/development_Mac.md:21
msgid ""
-"Due to the limitation of MacOS and packages, the lastest version of "
-"python does not always function as desired, thus miniconda installation "
-"is advised to solve the issues."
+"Due to the limitation of MacOS and packages, the latest version of Python"
+" does not always function as desired, thus miniconda installation is "
+"advised to solve the issues."
msgstr ""
-#: ../../source/contribs/development_MacARM.md:28
-msgid "Install TC Prerequisites"
-msgstr ""
-
-#: ../../source/contribs/development_MacARM.md:34
+#: ../../source/contribs/development_Mac.md:30
+#: ../../source/contribs/development_MacARM.md:37
msgid "Install TC Backends"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:36
-msgid "There are four backends to choose from, Numpy, Tensorflow, Jax, Torch."
+#: ../../source/contribs/development_Mac.md:32
+msgid "There are four backends to choose from, Numpy, Tensorflow, Jax, and Torch."
msgstr ""
-#: ../../source/contribs/development_MacARM.md:38
+#: ../../source/contribs/development_Mac.md:34
+#: ../../source/contribs/development_MacARM.md:41
msgid "Install Jax, Pytorch, Qiskit, Cirq (Optional)"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:44
+#: ../../source/contribs/development_Mac.md:40
+#: ../../source/contribs/development_MacARM.md:47
msgid "Install Tensorflow (Optional)"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:46
-msgid "Install Tensorflow (Recommended Approach)"
+#: ../../source/contribs/development_Mac.md:42
+msgid "Installation"
+msgstr ""
+
+#: ../../source/contribs/development_Mac.md:44
+msgid "For Tensorflow version 2.13 or later:"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:48
+#: ../../source/contribs/development_Mac.md:50
+msgid "For Tensorflow version 2.12 or earlier:"
+msgstr ""
+
+#: ../../source/contribs/development_Mac.md:56
+#: ../../source/contribs/development_MacARM.md:57
+#: ../../source/contribs/development_MacARM.md:89
+msgid "Verify Tensorflow Installation"
+msgstr ""
+
+#: ../../source/contribs/development_Mac.md:74
+#: ../../source/contribs/development_MacARM.md:107
+msgid "Install Tensorcircuit"
+msgstr ""
+
+#: ../../source/contribs/development_Mac.md:80
msgid ""
-"❗️ Tensorflow with MacOS optimization would not function correctly in "
-"version 2.11.0 and before. Do not use this version of tensorflow if you "
-"intented to train any machine learning model."
+"Until July 2023, this has been tested on Intel Macs running Ventura, M1 "
+"Macs running Ventura, M2 Macs running Ventura, and M2 Macs running Sonoma"
+" beta."
+msgstr ""
+
+#: ../../source/contribs/development_MacARM.md:3
+msgid "Contributed by Mark (Zixuan) Song"
+msgstr ""
+
+#: ../../source/contribs/development_MacARM.md:5
+#: ../../source/contribs/development_MacM2.md:5
+msgid ""
+".. warning:: This page is deprecated. Please visit `the update "
+"tutorial `_ for the latest information."
+msgstr ""
+
+#: ../../source/contribs/development_MacARM.md:10
+msgid "For completely new macos or macos without xcode and brew"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:50
+#: ../../source/contribs/development_MacARM.md:18
+#: ../../source/contribs/development_MacM2.md:18
msgid ""
-"FYI: Error can occur when machine learning training or gpu related code "
-"is involved."
+"Or Download from [Apple](https://developer.apple.com/download/more/) "
+"Command Line Tools installation image then install if internet connection"
+" is weak."
msgstr ""
-#: ../../source/contribs/development_MacARM.md:52
+#: ../../source/contribs/development_MacARM.md:22
msgid ""
-"⚠️ Tensorflow without macos optimization does not support Metal API and "
-"utilizing GPU (both intel chips and M-series chips) until at least "
-"tensorflow 2.11. Tensorflow-macos would fail when running "
-"`tc.backend.to_dense()`"
+"Due to the limitation of MacOS and packages, the lastest version of "
+"python does not always function as desired, thus miniconda installation "
+"is advised to solve the issues."
msgstr ""
-#: ../../source/contribs/development_MacARM.md:60
-msgid "Verify Tensorflow Installation"
+#: ../../source/contribs/development_MacARM.md:31
+msgid "Install TC Prerequisites"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:78
-msgid "Install Tensorcircuit"
+#: ../../source/contribs/development_MacARM.md:39
+msgid "There are four backends to choose from, Numpy, Tensorflow, Jax, Torch."
+msgstr ""
+
+#: ../../source/contribs/development_MacARM.md:49
+msgid "Install Tensorflow without MacOS optimization"
+msgstr ""
+
+#: ../../source/contribs/development_MacARM.md:75
+msgid "Install Tensorflow with MacOS optimization (Recommended)"
+msgstr ""
+
+#: ../../source/contribs/development_MacARM.md:77
+msgid "For tensorflow version 2.13 or later:"
+msgstr ""
+
+#: ../../source/contribs/development_MacARM.md:83
+msgid "For tensorflow version 2.12 or earlier:"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:84
-msgid "Testing Platform (Tested Feb 2023)"
+#: ../../source/contribs/development_MacARM.md:113
+msgid "Testing Platform (Tested Jun 2023)"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:86
+#: ../../source/contribs/development_MacARM.md:115
msgid "Platform 1:"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:87
+#: ../../source/contribs/development_MacARM.md:116
msgid "MacOS Ventura 13.1 (Build version 22C65)"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:88
+#: ../../source/contribs/development_MacARM.md:117
msgid "M1 Ultra"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:89
+#: ../../source/contribs/development_MacARM.md:118
msgid "Platform 2:"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:90
+#: ../../source/contribs/development_MacARM.md:119
msgid "MacOS Ventura 13.2 (Build version 22D49)"
msgstr ""
-#: ../../source/contribs/development_MacARM.md:91
+#: ../../source/contribs/development_MacARM.md:120
msgid "M1 Ultra (Virtual)"
msgstr ""
+#: ../../source/contribs/development_MacARM.md:121
+msgid "Platform 4:"
+msgstr ""
+
+#: ../../source/contribs/development_MacARM.md:122
+msgid "MacOS Sonoma 14.0 Beta 2 (Build version 23A5276g)"
+msgstr ""
+
+#: ../../source/contribs/development_MacARM.md:123
+msgid "M2 Max"
+msgstr ""
+
#: ../../source/contribs/development_MacM1.rst:2
msgid "Run TensorCircuit on TensorlowBackend with Apple M1"
msgstr ""
@@ -156,7 +236,7 @@ msgstr ""
#: ../../source/contribs/development_MacM1.rst:7
msgid ""
"This page is deprecated. Please visit `the update tutorial "
-"`_ for latest information."
+"`_ for the latest information."
msgstr ""
#: ../../source/contribs/development_MacM1.rst:11
@@ -256,6 +336,146 @@ msgstr ""
msgid "Then unpackage it, and cd into the folder with \"setup.py\". Conducting"
msgstr ""
+#: ../../source/contribs/development_MacM2.md:3
+msgid "Contributed by [Hong-Ye Hu](https://github.com/hongyehu)"
+msgstr ""
+
+#: ../../source/contribs/development_MacM2.md:8
+msgid ""
+"The key issue addressed in this document is **how to install both "
+"TensorFlow and Jax on a M2 chip MacOS without conflict**."
+msgstr ""
+
+#: ../../source/contribs/development_MacM2.md:22
+msgid ""
+"Due to the limitation of MacOS and packages, the lastest version of "
+"python does not always function as desired, thus miniconda installation "
+"is advised to solve the issues. And use anaconda virtual environment is "
+"always a good habit."
+msgstr ""
+
+#: ../../source/contribs/development_MacM2.md:30
+msgid "Install Packages"
+msgstr ""
+
+#: ../../source/contribs/development_MacM2.md:31
+msgid ""
+"First, create a virtual environment, and make sure the python version is "
+"3.8.5 by"
+msgstr ""
+
+#: ../../source/contribs/development_MacM2.md:36
+msgid ""
+"Then, install the TensorFlow from `.whl` file (file can be downloaded "
+"from this "
+"[URL](https://drive.google.com/drive/folders/1oSipZLnoeQB0Awz8U68KYeCPsULy_dQ7))."
+" This will install TensorFlow version 2.4.1"
+msgstr ""
+
+#: ../../source/contribs/development_MacM2.md:40
+msgid "Next, one need to install **Jax** and **Optax** by"
+msgstr ""
+
+#: ../../source/contribs/development_MacM2.md:45
+msgid ""
+"Now, hopefully, you should be able to use both Jax and TensorFlow in this"
+" environment. But sometimes, it may give you an error \"ERROR: package "
+"Chardet not found.\". If that is the case, you can install it by `conda "
+"install chardet`. Lastly, install tensorcircuit"
+msgstr ""
+
+#: ../../source/contribs/development_MacM2.md:51
+msgid ""
+"This is the solution that seems to work for M2-chip MacOS. Please let me "
+"know if there is a better solution!"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:1
+msgid "MacOS Tensorcircuit 安装教程"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:3
+msgid "[_Mark (Zixuan) Song_](https://marksong.tech) 撰写"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:5
+msgid "由于苹果更新了Tensorflow,因此M系列(直到M2)和英特尔系列Mac上的安装可以遵循完全相同的过程。"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:7
+msgid "从头开始"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:9
+msgid "对于全新的Macos或未安装Xcode和Homebrew的Macos。"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:11
+msgid "安装Xcode命令行工具"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:13
+msgid "需要对机器的图形访问。"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:15
+msgid "如果网络良好,请运行`xcode-select --install`进行安装。"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:17
+msgid "或者,如果网络连接较弱,请从[苹果](https://developer.apple.com/download/more/)下载命令行工具安装映像,然后进行安装。"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:19
+msgid "安装Miniconda"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:21
+msgid "由于MacOS和软件包的限制,因此建议安装miniconda以解决问题。"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:30
+msgid "安装TC后端"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:32
+msgid "有四个后端可供选择,Numpy,Tensorflow,Jax和Torch。"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:34
+msgid "安装Jax,Pytorch,Qiskit,Cirq(可选)"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:40
+msgid "安装Tensorflow(可选)"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:42
+msgid "安装步骤"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:44
+msgid "Tensorflow版本2.13或之后:"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:50
+msgid "Tensorflow版本2.12或之前:"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:56
+msgid "验证Tensorflow安装"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:74
+msgid "安装Tensorcircuit"
+msgstr ""
+
+#: ../../source/contribs/development_Mac_cn.md:80
+msgid ""
+"直到2023年7月,这已在运行Ventura的英特尔i9 Mac、运行Ventura的M1 Mac、运行Ventura的M2 "
+"Mac、运行Sonoma测试版的M2 Mac上进行了测试。"
+msgstr ""
+
#: ../../source/contribs/development_windows.rst:2
msgid "Run TensorCircuit on Windows Machine with Docker"
msgstr ""
@@ -666,3 +886,39 @@ msgstr ""
#~ msgid "Testing Platform"
#~ msgstr ""
+#~ msgid "Install Tensorflow (Recommended Approach)"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "❗️ Tensorflow with MacOS optimization "
+#~ "would not function correctly in version"
+#~ " 2.11.0 and before. Do not use "
+#~ "this version of tensorflow if you "
+#~ "intented to train any machine learning"
+#~ " model."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "FYI: Error can occur when machine "
+#~ "learning training or gpu related code"
+#~ " is involved."
+#~ msgstr ""
+
+#~ msgid ""
+#~ "⚠️ Tensorflow without macos optimization "
+#~ "does not support Metal API and "
+#~ "utilizing GPU (both intel chips and "
+#~ "M-series chips) until at least "
+#~ "tensorflow 2.11. Tensorflow-macos would "
+#~ "fail when running `tc.backend.to_dense()`"
+#~ msgstr ""
+
+#~ msgid "Testing Platform (Tested Feb 2023)"
+#~ msgstr ""
+
+#~ msgid ""
+#~ "This page is deprecated. Please visit"
+#~ " `the update tutorial `_"
+#~ " for latest information."
+#~ msgstr ""
+
diff --git a/docs/source/locale/zh/LC_MESSAGES/index.po b/docs/source/locale/zh/LC_MESSAGES/index.po
index 08959a7d..20ed436d 100644
--- a/docs/source/locale/zh/LC_MESSAGES/index.po
+++ b/docs/source/locale/zh/LC_MESSAGES/index.po
@@ -8,8 +8,8 @@ msgid ""
msgstr ""
"Project-Id-Version: tensorcircuit\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2023-05-10 22:52+0800\n"
-"PO-Revision-Date: 2023-05-10 22:54+0800\n"
+"POT-Creation-Date: 2023-05-28 14:36+0800\n"
+"PO-Revision-Date: 2023-05-28 14:39+0800\n"
"Last-Translator: Xinghan Yang\n"
"Language-Team: \n"
"Language: cn\n"
@@ -27,44 +27,48 @@ msgstr "参考文档"
msgid "**Welcome and congratulations! You have found TensorCircuit.** 👏"
msgstr "**祝贺你发现了 TensorCircuit!** 👏"
-#: ../../source/index.rst:10
+#: ../../source/index.rst:11
+msgid "Introduction"
+msgstr "介绍"
+
+#: ../../source/index.rst:13
msgid ""
"TensorCircuit is an open-source high-performance quantum computing software "
"framework in Python."
msgstr "TensorCircuit 是基于 Python 的开源高性能量子计算软件框架。"
-#: ../../source/index.rst:12
+#: ../../source/index.rst:15
msgid "It is built for humans. 👽"
msgstr "适合人类。👽"
-#: ../../source/index.rst:14
+#: ../../source/index.rst:17
msgid "It is designed for speed, flexibility and elegance. 🚀"
msgstr "速度,灵活,优雅。🚀"
-#: ../../source/index.rst:16
+#: ../../source/index.rst:19
msgid "It is empowered by advanced tensor network simulator engine. 🔋"
msgstr "先进张量网络引擎赋能。🔋"
-#: ../../source/index.rst:18
+#: ../../source/index.rst:21
msgid ""
"It is ready for quantum hardware access with CPU/GPU/QPU (local/cloud) hybrid "
"solutions. 🖥"
msgstr "量子硬件支持,优雅 CPU/GPU/QPU 混合部署方案。 🖥"
-#: ../../source/index.rst:20
+#: ../../source/index.rst:23
msgid ""
"It is implemented with industry-standard machine learning frameworks: "
"TensorFlow, JAX, and PyTorch. 🤖"
msgstr "业界标准机器学习框架 TensorFlow,JAX,PyTorch 实现。🤖"
-#: ../../source/index.rst:22
+#: ../../source/index.rst:25
msgid ""
"It is compatible with machine learning engineering paradigms: automatic "
"differentiation, just-in-time compilation, vectorized parallelism and GPU "
"acceleration. 🛠"
msgstr "与机器学习工程实践兼容:自动微分,即时编译,向量并行化和 GPU 加速。🛠"
-#: ../../source/index.rst:24
+#: ../../source/index.rst:27
msgid ""
"With the help of TensorCircuit, now get ready to efficiently and elegantly "
"solve interesting and challenging quantum computing problems: from academic "
@@ -73,11 +77,11 @@ msgstr ""
"有了 TensorCircuit,你现在可以高效优雅地解决量子计算中的各种问题:从学术研究的原"
"型开发到工业应用的部署。"
-#: ../../source/index.rst:29
+#: ../../source/index.rst:33
msgid "Relevant Links"
msgstr "相关链接"
-#: ../../source/index.rst:31
+#: ../../source/index.rst:35
msgid ""
"TensorCircuit is created and maintained by `Shi-Xin Zhang `_ and this version is released by `Tencent Quantum Lab `_ 创建和维"
"护;此版本由 `腾讯量子实验室 `_ 发布。"
-#: ../../source/index.rst:33
+#: ../../source/index.rst:37
msgid ""
"The current core authors of TensorCircuit are `Shi-Xin Zhang `_ and `Yu-Qin Chen `_. We also "
@@ -98,7 +102,7 @@ msgstr ""
"区的 `贡献 `_。"
-#: ../../source/index.rst:36
+#: ../../source/index.rst:40
msgid ""
"If you have any further questions or collaboration ideas, please use the issue "
"tracker or forum below, or send email to shixinzhang#tencent.com."
@@ -106,93 +110,189 @@ msgstr ""
"如果关于 TensorCircuit 有任何问题咨询或合作意向,请在 issue 或 discussion 提问,"
"或发送邮件到 shixinzhang#tencent.com。"
-#: ../../source/index.rst:39
-msgid "Source code: https://github.com/tencent-quantum-lab/tensorcircuit"
-msgstr "源代码: https://github.com/tencent-quantum-lab/tensorcircuit"
+#: ../../source/index.rst:45
+msgid "Source code"
+msgstr "源代码"
-#: ../../source/index.rst:41
-msgid "Documentation: https://tensorcircuit.readthedocs.io"
-msgstr "文档: https://tensorcircuit.readthedocs.io"
+#: ../../source/index.rst:49
+msgid "GitHub"
+msgstr ""
-#: ../../source/index.rst:43
-msgid ""
-"Software Whitepaper (published in Quantum): https://quantum-journal.org/papers/"
-"q-2023-02-02-912/"
+#: ../../source/index.rst:52
+msgid "Documentation"
+msgstr "参考文档"
+
+#: ../../source/index.rst:56
+msgid "Readthedocs"
msgstr ""
-"软件白皮书 (发表于 Quantum): https://quantum-journal.org/papers/"
-"q-2023-02-02-912/"
-#: ../../source/index.rst:45
-msgid "Issue Tracker: https://github.com/tencent-quantum-lab/tensorcircuit/issues"
-msgstr "问题跟踪: https://github.com/tencent-quantum-lab/tensorcircuit/issues"
+#: ../../source/index.rst:59
+msgid "Whitepaper"
+msgstr "白皮书"
+
+#: ../../source/index.rst:63
+msgid "*Quantum* journal"
+msgstr "Quantum 期刊"
+
+#: ../../source/index.rst:66
+msgid "Issue Tracker"
+msgstr "问题跟踪"
-#: ../../source/index.rst:47
-msgid "Forum: https://github.com/tencent-quantum-lab/tensorcircuit/discussions"
+#: ../../source/index.rst:70
+msgid "GitHub Issues"
msgstr ""
-"论坛社区: https://github.com/tencent-quantum-lab/tensorcircuit/discussions"
-#: ../../source/index.rst:49
-msgid "PyPI page: https://pypi.org/project/tensorcircuit"
-msgstr "PyPI 页面: https://pypi.org/project/tensorcircuit"
+#: ../../source/index.rst:73
+msgid "Forum"
+msgstr "论坛"
-#: ../../source/index.rst:51
-msgid ""
-"DockerHub page: https://hub.docker.com/repository/docker/tensorcircuit/"
-"tensorcircuit"
+#: ../../source/index.rst:77
+msgid "GitHub Discussions"
+msgstr ""
+
+#: ../../source/index.rst:80
+msgid "PyPI"
msgstr ""
-"DockerHub 页面: https://hub.docker.com/repository/docker/tensorcircuit/"
-"tensorcircuit"
-#: ../../source/index.rst:53
+#: ../../source/index.rst:84
+msgid "``pip install``"
+msgstr ""
+
+#: ../../source/index.rst:87
+msgid "DockerHub"
+msgstr ""
+
+#: ../../source/index.rst:91
+msgid "``docker pull``"
+msgstr ""
+
+#: ../../source/index.rst:94
+msgid "Application"
+msgstr "应用"
+
+#: ../../source/index.rst:98
+msgid "Research using TC"
+msgstr "研究项目"
+
+#: ../../source/index.rst:101
+msgid "Cloud"
+msgstr "量子云"
+
+#: ../../source/index.rst:104
+msgid "Tencent Quantum Cloud"
+msgstr "腾讯量子云平台"
+
+#: ../../source/index.rst:131
+msgid "Unified Quantum Programming"
+msgstr "统一量子编程"
+
+#: ../../source/index.rst:133
msgid ""
-"Research and projects based on TensorCircuit: https://github.com/tencent-"
-"quantum-lab/tensorcircuit#research-and-applications"
+"TensorCircuit is unifying infrastructures and interfaces for quantum computing."
+msgstr "TensorCircuit 尝试统一量子计算的基础设施和编程界面。"
+
+#: ../../source/index.rst:140
+msgid "Unified Backends"
+msgstr "统一后端"
+
+#: ../../source/index.rst:144
+msgid "Jax/TensorFlow/PyTorch/Numpy/Cupy"
+msgstr ""
+
+#: ../../source/index.rst:146
+msgid "Unified Devices"
+msgstr "统一设备"
+
+#: ../../source/index.rst:150
+msgid "CPU/GPU/TPU"
msgstr ""
-"基于 TensorCircuit 的研究和项目: https://github.com/tencent-quantum-lab/"
-"tensorcircuit#research-and-applications"
-#: ../../source/index.rst:55
-msgid "Tencent Quantum Cloud Service: https://quantum.tencent.com/cloud/"
-msgstr "腾讯量子云服务: https://quantum.tencent.com/cloud/"
+#: ../../source/index.rst:152
+msgid "Unified Providers"
+msgstr "统一平台"
+
+#: ../../source/index.rst:156
+msgid "QPUs from different vendors"
+msgstr "不同供应商的 QPU"
+
+#: ../../source/index.rst:158
+msgid "Unified Resources"
+msgstr "统一资源"
+
+#: ../../source/index.rst:162
+msgid "local/cloud/HPC"
+msgstr "本地/云/集群"
+
+#: ../../source/index.rst:170
+msgid "Unified Interfaces"
+msgstr "统一接口"
+
+#: ../../source/index.rst:174
+msgid "numerical sim/hardware exp"
+msgstr "数值模拟/硬件实验"
+
+#: ../../source/index.rst:176
+msgid "Unified Engines"
+msgstr "统一引擎"
+
+#: ../../source/index.rst:180
+msgid "ideal/noisy/approximate simulation"
+msgstr "理想/含噪/近似模拟"
+
+#: ../../source/index.rst:182
+msgid "Unified Representations"
+msgstr "统一表示"
-#: ../../source/index.rst:61
+#: ../../source/index.rst:186
+msgid "from/to_IR/qiskit/openqasm/json"
+msgstr ""
+
+#: ../../source/index.rst:188
+msgid "Unified Pipelines"
+msgstr "统一流程"
+
+#: ../../source/index.rst:192
+msgid "stateless functional programming/stateful ML models"
+msgstr "函数式编程/面向对象模型"
+
+#: ../../source/index.rst:198
msgid "Reference Documentation"
msgstr "参考文档"
-#: ../../source/index.rst:63
+#: ../../source/index.rst:200
msgid ""
"The following documentation sections briefly introduce TensorCircuit to the "
"users and developpers."
msgstr "以下文档向用户和开发者简要介绍了 TensorCircuit 软件。"
-#: ../../source/index.rst:76
+#: ../../source/index.rst:213
msgid "Tutorials"
msgstr "教程"
-#: ../../source/index.rst:78
+#: ../../source/index.rst:215
msgid ""
"The following documentation sections include integrated examples in the form of "
"Jupyter Notebook."
msgstr ""
"以下 Jupyter Notebook 格式的文档包括了一系列使用 TensorCircuit 的集成案例。"
-#: ../../source/index.rst:92
+#: ../../source/index.rst:229
msgid "API References"
msgstr "API 参考"
-#: ../../source/index.rst:101
+#: ../../source/index.rst:238
msgid "Indices and Tables"
msgstr "索引和表格"
-#: ../../source/index.rst:103
+#: ../../source/index.rst:240
msgid ":ref:`genindex`"
msgstr ":ref:`genindex`"
-#: ../../source/index.rst:104
+#: ../../source/index.rst:241
msgid ":ref:`modindex`"
msgstr ":ref:`modindex`"
-#: ../../source/index.rst:105
+#: ../../source/index.rst:242
msgid ":ref:`search`"
msgstr ":ref:`search`"
@@ -213,3 +313,47 @@ msgstr ":ref:`search`"
#~ msgid "Links"
#~ msgstr "重要链接"
+
+#~ msgid "Source code: https://github.com/tencent-quantum-lab/tensorcircuit"
+#~ msgstr "源代码: https://github.com/tencent-quantum-lab/tensorcircuit"
+
+#~ msgid "Documentation: https://tensorcircuit.readthedocs.io"
+#~ msgstr "文档: https://tensorcircuit.readthedocs.io"
+
+#~ msgid ""
+#~ "Software Whitepaper (published in Quantum): https://quantum-journal.org/"
+#~ "papers/q-2023-02-02-912/"
+#~ msgstr ""
+#~ "软件白皮书 (发表于 Quantum): https://quantum-journal.org/papers/"
+#~ "q-2023-02-02-912/"
+
+#~ msgid ""
+#~ "Issue Tracker: https://github.com/tencent-quantum-lab/tensorcircuit/issues"
+#~ msgstr "问题跟踪: https://github.com/tencent-quantum-lab/tensorcircuit/issues"
+
+#~ msgid "Forum: https://github.com/tencent-quantum-lab/tensorcircuit/discussions"
+#~ msgstr ""
+#~ "论坛社区: https://github.com/tencent-quantum-lab/tensorcircuit/discussions"
+
+#~ msgid "PyPI page: https://pypi.org/project/tensorcircuit"
+#~ msgstr "PyPI 页面: https://pypi.org/project/tensorcircuit"
+
+#~ msgid ""
+#~ "DockerHub page: https://hub.docker.com/repository/docker/tensorcircuit/"
+#~ "tensorcircuit"
+#~ msgstr ""
+#~ "DockerHub 页面: https://hub.docker.com/repository/docker/tensorcircuit/"
+#~ "tensorcircuit"
+
+#~ msgid ""
+#~ "Research and projects based on TensorCircuit: https://github.com/tencent-"
+#~ "quantum-lab/tensorcircuit#research-and-applications"
+#~ msgstr ""
+#~ "基于 TensorCircuit 的研究和项目: https://github.com/tencent-quantum-lab/"
+#~ "tensorcircuit#research-and-applications"
+
+#~ msgid "Tencent Quantum Cloud Service: https://quantum.tencent.com/cloud/"
+#~ msgstr "腾讯量子云服务: https://quantum.tencent.com/cloud/"
+
+#~ msgid "Research based on TC"
+#~ msgstr "基于 TC 的研究项目"
diff --git a/docs/source/locale/zh/LC_MESSAGES/infras.po b/docs/source/locale/zh/LC_MESSAGES/infras.po
index a24e7660..b285ed77 100644
--- a/docs/source/locale/zh/LC_MESSAGES/infras.po
+++ b/docs/source/locale/zh/LC_MESSAGES/infras.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: tensorcircuit\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2023-05-07 10:47+0800\n"
+"POT-Creation-Date: 2023-05-27 18:52+0800\n"
"PO-Revision-Date: 2022-04-18 20:44+0800\n"
"Last-Translator: Xinghan Yang\n"
"Language: cn\n"
@@ -403,6 +403,62 @@ msgstr ""
"(``*``)、张量乘积(``|``)和偏迹(``.partial_trace(subsystems_to_trace_out)``)。要提取这些对象的矩阵信息,我们可以使用"
" ``.eval()`` 或 ``.eval_matrix() ``,前者保留了张量网络的形状信息,而后者给出了形状秩为2的矩阵表示。"
+#: ../../source/infras.rst:162
+msgid "Quantum Cloud SDK: Layerwise API design"
+msgstr ""
+
+#: ../../source/infras.rst:164
+msgid "From lower level to higher level, a view of API layers invoking QPU calls"
+msgstr ""
+
+#: ../../source/infras.rst:166
+msgid ""
+"Vendor specific implementation of functional API in, e.g., "
+":py:mod:`tensorcircuit.cloud.tencent`"
+msgstr ""
+
+#: ../../source/infras.rst:168
+msgid ""
+"Provider agnostic functional lower level API for task/device management "
+"in :py:mod:`tensorcircuit.cloud.apis`"
+msgstr ""
+
+#: ../../source/infras.rst:170
+msgid ""
+"Object oriented abstraction for Provider/Device/Task in "
+":py:mod:`tensorcircuit.cloud.abstraction`"
+msgstr ""
+
+#: ../../source/infras.rst:172
+msgid ""
+"Unified batch submission interface as standarized in "
+":py:meth:`tensorcircuit.cloud.wrapper.batch_submit_template`"
+msgstr ""
+
+#: ../../source/infras.rst:174
+msgid ""
+"Numerical and experimental unified all-in-one interface as "
+":py:meth:`tensorcircuit.cloud.wrapper.batch_expectation_ps`"
+msgstr ""
+
+#: ../../source/infras.rst:176
+msgid ""
+"Application level code with QPU calls built directly on "
+"``batch_expectation_ps`` or more fancy algorithms can be built on "
+"``batch_submit_func`` so that these algorithms can be reused as long as "
+"one function ``batch_submit_func`` is defined for a given vendor (cheaper"
+" than defining a new provider from lower level)."
+msgstr ""
+
+#: ../../source/infras.rst:181
+msgid ""
+"For compiler, error mitigation and results post-processing parts, they "
+"can be carefully designed to decouple with the QPU calls, so they are "
+"separately implemented in :py:mod:`tensorcircuit.compiler` and "
+":py:mod:`tensorcircuit.results`, and they can be independently useful "
+"even without tc's cloud access."
+msgstr ""
+
#~ msgid ""
#~ ":py:mod:`tensorcircuit.densitymatrix2`: Highly efficient"
#~ " implementation of "
diff --git a/docs/source/locale/zh/LC_MESSAGES/quickstart.po b/docs/source/locale/zh/LC_MESSAGES/quickstart.po
index 75a6f3e5..b4275455 100644
--- a/docs/source/locale/zh/LC_MESSAGES/quickstart.po
+++ b/docs/source/locale/zh/LC_MESSAGES/quickstart.po
@@ -6,18 +6,17 @@
#
msgid ""
msgstr ""
-"Project-Id-Version: tensorcircuit\n"
+"Project-Id-Version: tensorcircuit\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2023-05-07 10:47+0800\n"
+"POT-Creation-Date: 2023-07-14 15:43+0800\n"
"PO-Revision-Date: 2023-05-07 11:01+0800\n"
"Last-Translator: Xinghan Yang\n"
-"Language-Team: Xinghan Yang\n"
"Language: cn\n"
+"Language-Team: Xinghan Yang\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.9.1\n"
-"X-Generator: Poedit 3.2.2\n"
+"Generated-By: Babel 2.12.1\n"
#: ../../source/quickstart.rst:3
msgid "Quick Start"
@@ -28,22 +27,23 @@ msgid "Installation"
msgstr "安装"
#: ../../source/quickstart.rst:8
-msgid "For x86 Linux or Mac,"
-msgstr ""
+msgid "For x86 Linux,"
+msgstr "x64 Linux"
#: ../../source/quickstart.rst:10
msgid "``pip install tensorcircuit``"
-msgstr ""
+msgstr "``pip install tensorcircuit``"
#: ../../source/quickstart.rst:12
msgid ""
-"is in general enough. Either pip from conda or other python env managers is fine."
+"is in general enough. Either pip from conda or other python env managers "
+"is fine."
msgstr ""
#: ../../source/quickstart.rst:15
msgid ""
-"Since there are many optional packages for various features, the users may need "
-"to install more pip packages when required."
+"Since there are many optional packages for various features, the users "
+"may need to install more pip packages when required."
msgstr ""
#: ../../source/quickstart.rst:18
@@ -52,9 +52,10 @@ msgstr ""
#: ../../source/quickstart.rst:19
msgid ""
-"please refer to the GPU aware installation guide of corresponding machine "
-"learning frameworks: `TensorFlow `_, "
-"`Jax `_, or `PyTorch "
+"please refer to the GPU aware installation guide of corresponding machine"
+" learning frameworks: `TensorFlow "
+"`_, `Jax "
+"`_, or `PyTorch "
"`_."
msgstr ""
@@ -64,46 +65,48 @@ msgstr ""
#: ../../source/quickstart.rst:26
msgid ""
-"``sudo docker run -it --network host --gpus all tensorcircuit/tensorcircuit``."
+"``sudo docker run -it --network host --gpus all "
+"tensorcircuit/tensorcircuit``."
msgstr ""
#: ../../source/quickstart.rst:28
msgid ""
-"For more details on docker setup, please refer to `docker readme `_."
+"For more details on docker setup, please refer to `docker readme "
+"`_."
msgstr ""
#: ../../source/quickstart.rst:30
msgid ""
-"For Windows, due to the lack of support for Jax, we recommend to use docker or "
-"WSL, please refer to `TC via windows docker `_ or `TC via WSL `_."
+"For Windows, due to the lack of support for Jax, we recommend to use "
+"docker or WSL, please refer to `TC via windows docker "
+"`_ or `TC via WSL "
+"`_."
msgstr ""
#: ../../source/quickstart.rst:32
-msgid ""
-"For Mac with M series chips (arm architecture), please refer to `TC on Mac M "
-"series `_."
-msgstr ""
+msgid "For MacOS, please refer to `TC on Mac `_."
+msgstr "For MacOS, please refer to `在Mac上安装TC `_."
#: ../../source/quickstart.rst:34
msgid ""
-"Overall, the installation of TensorCircuit is simple, since it is purely in "
-"Python and hence very portable. As long as the users can take care of the "
-"installation of ML frameworks on the corresponding system, TensorCircuit will "
-"work as expected."
+"Overall, the installation of TensorCircuit is simple, since it is purely "
+"in Python and hence very portable. As long as the users can take care of "
+"the installation of ML frameworks on the corresponding system, "
+"TensorCircuit will work as expected."
msgstr ""
#: ../../source/quickstart.rst:37
msgid ""
-"To debug the installation issue or report bugs, please check the environment "
-"information by ``tc.about()``."
+"To debug the installation issue or report bugs, please check the "
+"environment information by ``tc.about()``."
msgstr ""
#: ../../source/quickstart.rst:40
msgid ""
-"We also provide a nightly build of tensorcircuit via PyPI which can be accessed "
-"by ``pip uninstall tensorcircuit``, then ``pip install tensorcircuit-nightly``"
+"We also provide a nightly build of tensorcircuit via PyPI which can be "
+"accessed by ``pip uninstall tensorcircuit``, then ``pip install "
+"tensorcircuit-nightly``"
msgstr ""
#: ../../source/quickstart.rst:46
@@ -124,20 +127,18 @@ msgstr "**输入状态:**"
#: ../../source/quickstart.rst:54
msgid ""
-"The default input function for the circuit is :math:`\\vert 0^n \\rangle`. One "
-"can change this to other wavefunctions by directly feeding the inputs state "
-"vectors w: ``c=tc.Circuit(n, inputs=w)``."
+"The default input function for the circuit is :math:`\\vert 0^n "
+"\\rangle`. One can change this to other wavefunctions by directly feeding"
+" the inputs state vectors w: ``c=tc.Circuit(n, inputs=w)``."
msgstr ""
-"电路的默认输入函数是 :math:`\\vert 0^n \\rangle` 。可以通过直接输入输入状态向量 "
-"w 将其更改为其他波函数: ``c=tc.Circuit(n, inputs=w)``。"
+"电路的默认输入函数是 :math:`\\vert 0^n \\rangle` 。可以通过直接输入输入状态向量 w 将其更改为其他波函数: "
+"``c=tc.Circuit(n, inputs=w)``。"
#: ../../source/quickstart.rst:56
msgid ""
-"One can also feed matrix product states as input states for the circuit, but we "
-"leave MPS/MPO usage for future sections."
-msgstr ""
-"也可以将矩阵乘积状态作为电路的输入状态,但我们将矩阵乘积状态/矩阵乘积算子的使用留"
-"待后续讲解。"
+"One can also feed matrix product states as input states for the circuit, "
+"but we leave MPS/MPO usage for future sections."
+msgstr "也可以将矩阵乘积状态作为电路的输入状态,但我们将矩阵乘积状态/矩阵乘积算子的使用留待后续讲解。"
#: ../../source/quickstart.rst:58
msgid "**Quantum Gates:**"
@@ -145,13 +146,12 @@ msgstr "**量子门:**"
#: ../../source/quickstart.rst:60
msgid ""
-"We can apply gates on circuit objects. For example, using ``c.H(1)`` or ``c."
-"rx(2, theta=0.2)``, we can apply Hadamard gate on qubit 1 (0-based) or apply Rx "
-"gate on qubit 2 as :math:`e^{-i\\theta/2 X}`."
+"We can apply gates on circuit objects. For example, using ``c.H(1)`` or "
+"``c.rx(2, theta=0.2)``, we can apply Hadamard gate on qubit 1 (0-based) "
+"or apply Rx gate on qubit 2 as :math:`e^{-i\\theta/2 X}`."
msgstr ""
-"我们可以将门应用于电路对象。 例如,使用 ``c.H(1)`` 或 ``c.rx(2, theta=0.2)``,我"
-"们可以将 Hadamard 门应用于量子比特1 (基于0)或将 Rx 门应用于量子比特2 :math:"
-"`e^{-i\\theta/2 X}`。"
+"我们可以将门应用于电路对象。 例如,使用 ``c.H(1)`` 或 ``c.rx(2, theta=0.2)``,我们可以将 Hadamard "
+"门应用于量子比特1 (基于0)或将 Rx 门应用于量子比特2 :math:`e^{-i\\theta/2 X}`。"
#: ../../source/quickstart.rst:62
msgid "The same rule also applies to multi-qubit gates, such as ``c.cnot(0, 1)``."
@@ -163,16 +163,16 @@ msgstr "这些量子门也是高度可定制的,下面是两个例子"
#: ../../source/quickstart.rst:66
msgid ""
-"``c.exp1(0, 1, unitary=m, theta=0.2)`` which is for the exponential gate :math:"
-"`e^{i\\theta m}` of any matrix m as long as :math:`m^2=1`."
+"``c.exp1(0, 1, unitary=m, theta=0.2)`` which is for the exponential gate "
+":math:`e^{i\\theta m}` of any matrix m as long as :math:`m^2=1`."
msgstr ""
-"``c.exp1(0, 1, unitary=m, theta=0.2)`` 用于任何矩阵 m 的指数门 :math:"
-"`e^{i\\theta m}`,只要 m 满足 :math:`m^2=1`。"
+"``c.exp1(0, 1, unitary=m, theta=0.2)`` 用于任何矩阵 m 的指数门 :math:`e^{i\\theta "
+"m}`,只要 m 满足 :math:`m^2=1`。"
#: ../../source/quickstart.rst:68
msgid ""
-"``c.any(0, 1, unitary=m)`` which is for applying the unitary gate m on the "
-"circuit."
+"``c.any(0, 1, unitary=m)`` which is for applying the unitary gate m on "
+"the circuit."
msgstr "``c.any(0, 1, unitary=m)`` 在电路上作用任意的幺正量子门。"
#: ../../source/quickstart.rst:70
@@ -185,36 +185,33 @@ msgstr "**测量与期望**"
#: ../../source/quickstart.rst:74
msgid ""
-"The most straightforward way to get the output from the circuit object is by "
-"getting the output wavefunction in vector form as ``c.state()``."
-msgstr ""
-"从电路对象中获取输出的最直接的方法是通过 ``c.state()`` 以向量形式获取输出波函数。"
+"The most straightforward way to get the output from the circuit object is"
+" by getting the output wavefunction in vector form as ``c.state()``."
+msgstr "从电路对象中获取输出的最直接的方法是通过 ``c.state()`` 以向量形式获取输出波函数。"
#: ../../source/quickstart.rst:76
msgid ""
-"For bitstring sampling, we have ``c.perfect_sampling()`` which returns the "
-"bitstring and the corresponding probability amplitude."
-msgstr ""
-"对于位串采样,我们有 ``c.perfect_sampling()``,它返回位串和相应的概率幅度。"
+"For bitstring sampling, we have ``c.perfect_sampling()`` which returns "
+"the bitstring and the corresponding probability amplitude."
+msgstr "对于位串采样,我们有 ``c.perfect_sampling()``,它返回位串和相应的概率幅度。"
#: ../../source/quickstart.rst:78
msgid ""
-"To measure part of the qubits, we can use ``c.measure(0, 1)``, if we want to "
-"know the corresponding probability of the measurement output, try ``c.measure(0, "
-"1, with_prob=True)``. The measure API is by default non-jittable, but we also "
-"have a jittable version as ``c.measure_jit(0, 1)``."
+"To measure part of the qubits, we can use ``c.measure(0, 1)``, if we want"
+" to know the corresponding probability of the measurement output, try "
+"``c.measure(0, 1, with_prob=True)``. The measure API is by default non-"
+"jittable, but we also have a jittable version as ``c.measure_jit(0, 1)``."
msgstr ""
-"要测量部分量子比特,我们可以使用 ``c.measure(0, 1)``,如果我们想知道测量的结果的"
-"对应概率,可以尝试 ``c.measure(0, 1, with_prob=True)``。 测量 API 在默认情况下是"
-"不可即时编译的 ,但我们也有一个可即时编译的版本,如 ``c.measure_jit(0, 1)``。"
+"要测量部分量子比特,我们可以使用 ``c.measure(0, 1)``,如果我们想知道测量的结果的对应概率,可以尝试 "
+"``c.measure(0, 1, with_prob=True)``。 测量 API 在默认情况下是不可即时编译的 "
+",但我们也有一个可即时编译的版本,如 ``c.measure_jit(0, 1)``。"
#: ../../source/quickstart.rst:80
msgid ""
-"The measurement and sampling utilize advanced algorithms based on tensornetwork "
-"and thus require no knowledge or space for the full wavefunction."
-msgstr ""
-"测量和采样使用了基于张量网络的高级算法,因此不需要任何相关知识或者空间来获取全波"
-"函数。"
+"The measurement and sampling utilize advanced algorithms based on "
+"tensornetwork and thus require no knowledge or space for the full "
+"wavefunction."
+msgstr "测量和采样使用了基于张量网络的高级算法,因此不需要任何相关知识或者空间来获取全波函数。"
#: ../../source/quickstart.rst:82
msgid "See the example below:"
@@ -222,25 +219,26 @@ msgstr "请看下面的例子:"
#: ../../source/quickstart.rst:100
msgid ""
-"To compute expectation values for local observables, we have ``c.expectation([tc."
-"gates.z(), [0]], [tc.gates.z(), [1]])`` for :math:`\\langle Z_0Z_1 \\rangle` or "
-"``c.expectation([tc.gates.x(), [0]])`` for :math:`\\langle X_0 \\rangle`."
+"To compute expectation values for local observables, we have "
+"``c.expectation([tc.gates.z(), [0]], [tc.gates.z(), [1]])`` for "
+":math:`\\langle Z_0Z_1 \\rangle` or ``c.expectation([tc.gates.x(), "
+"[0]])`` for :math:`\\langle X_0 \\rangle`."
msgstr ""
-"为了计算局部可观察量的期望值,我们有 ``c.expectation([tc.gates.z(), [0]], [tc."
-"gates.z(), [1]])`` 对应的期望为 :math:`\\langle Z_0Z_1 \\rangle` 时,或 ``c."
-"expectation([tc.gates.x(), [0]])`` 对应的期望为 :math:`\\langle X_0 \\rangle`时."
+"为了计算局部可观察量的期望值,我们有 ``c.expectation([tc.gates.z(), [0]], [tc.gates.z(), "
+"[1]])`` 对应的期望为 :math:`\\langle Z_0Z_1 \\rangle` 时,或 "
+"``c.expectation([tc.gates.x(), [0]])`` 对应的期望为 :math:`\\langle X_0 "
+"\\rangle`时."
#: ../../source/quickstart.rst:102
msgid ""
-"This expectation API is rather flexible, as one can measure an m on several "
-"qubits as ``c.expectation([m, [0, 1, 2]])``."
-msgstr ""
-"因为可以在几个量子比特上测量一个 m,这种计算期望值的 API 相当灵活:``c."
-"expectation([m, [0, 1, 2]])``。"
+"This expectation API is rather flexible, as one can measure an m on "
+"several qubits as ``c.expectation([m, [0, 1, 2]])``."
+msgstr "因为可以在几个量子比特上测量一个 m,这种计算期望值的 API 相当灵活:``c.expectation([m, [0, 1, 2]])``。"
#: ../../source/quickstart.rst:104
msgid ""
-"We can also extract the unitary matrix underlying the whole circuit as follows:"
+"We can also extract the unitary matrix underlying the whole circuit as "
+"follows:"
msgstr "我们还可以提取整个电路下面的幺正矩阵,如下所示:"
#: ../../source/quickstart.rst:117
@@ -251,22 +249,20 @@ msgstr "**电路可视化**"
msgid ""
"We currently support transform ``tc.Circuit`` from and to Qiskit "
"``QuantumCircuit`` object."
-msgstr ""
-"我们目前支持 ``tc.Circuit`` 与 Qiskit ``QuantumCircuit`` 对象之间的互相转换。"
+msgstr "我们目前支持 ``tc.Circuit`` 与 Qiskit ``QuantumCircuit`` 对象之间的互相转换。"
#: ../../source/quickstart.rst:121
msgid ""
-"Export to Qiskit (possible for further hardware experiment, compiling, and "
-"visualization): ``c.to_qiskit()``."
-msgstr ""
-"导出到 Qiskit(可能用于进一步的硬件实验、编译和可视化):``c.to_qiskit()``。"
+"Export to Qiskit (possible for further hardware experiment, compiling, "
+"and visualization): ``c.to_qiskit()``."
+msgstr "导出到 Qiskit(可能用于进一步的硬件实验、编译和可视化):``c.to_qiskit()``。"
#: ../../source/quickstart.rst:123
msgid ""
"Import from Qiskit: ``c = tc.Circuit.from_qiskit(QuantumCircuit, n)``. "
-"Parameterized Qiskit circuit is supported by passing the parameters to the "
-"``binding_parameters`` argument of the ``from_qiskit`` function, similar to the "
-"``assign_parameters`` function in Qiskit."
+"Parameterized Qiskit circuit is supported by passing the parameters to "
+"the ``binding_parameters`` argument of the ``from_qiskit`` function, "
+"similar to the ``assign_parameters`` function in Qiskit."
msgstr ""
#: ../../source/quickstart.rst:127
@@ -275,40 +271,41 @@ msgstr "**电路可视化**"
#: ../../source/quickstart.rst:129
msgid ""
-"``c.vis_tex()`` can generate tex code for circuit visualization based on LaTeX "
-"`quantikz `__ package."
+"``c.vis_tex()`` can generate tex code for circuit visualization based on "
+"LaTeX `quantikz `__ package."
msgstr ""
"``c.vis_tex()`` 可以基于 `quantikz `__ "
"package 生成用于电路可视化的 tex 代码。"
#: ../../source/quickstart.rst:131
msgid ""
-"There are also some automatic pipeline helper functions to directly generate "
-"figures from tex code, but they require extra installations in the environment."
-msgstr ""
-"还有一些自动辅助函数可以直接从 tex 代码生成图形,但它们需要在环境中进行额外安装。"
+"There are also some automatic pipeline helper functions to directly "
+"generate figures from tex code, but they require extra installations in "
+"the environment."
+msgstr "还有一些自动辅助函数可以直接从 tex 代码生成图形,但它们需要在环境中进行额外安装。"
#: ../../source/quickstart.rst:133
msgid ""
-"``render_pdf(tex)`` function requires full installation of LaTeX locally. And in "
-"the Jupyter environment, we may prefer ``render_pdf(tex, notebook=True)`` to "
-"return jpg figures, which further require wand magicwand library installed, see "
-"`here `__."
+"``render_pdf(tex)`` function requires full installation of LaTeX locally."
+" And in the Jupyter environment, we may prefer ``render_pdf(tex, "
+"notebook=True)`` to return jpg figures, which further require wand "
+"magicwand library installed, see `here `__."
msgstr ""
-"``render_pdf(tex)`` 函数需要在本地完全安装 LaTeX。 在 Jupyter 环境中,我们可能会"
-"偏好 ``render_pdf(tex, notebook=True)`` 来返回 jpg 图形,这需要安装 wand "
-"magicwand 库,请参阅 `这里 `__ 。"
+"``render_pdf(tex)`` 函数需要在本地完全安装 LaTeX。 在 Jupyter 环境中,我们可能会偏好 "
+"``render_pdf(tex, notebook=True)`` 来返回 jpg 图形,这需要安装 wand magicwand 库,请参阅 "
+"`这里 `__ 。"
#: ../../source/quickstart.rst:135
msgid ""
-"Or since we can transform ``tc.Circuit`` into QuantumCircuit easily, we have a "
-"simple pipeline to first transform ``tc.Circuit`` into Qiskit and then call the "
-"visualization built in Qiskit. Namely, we have ``c.draw()`` API."
+"Or since we can transform ``tc.Circuit`` into QuantumCircuit easily, we "
+"have a simple pipeline to first transform ``tc.Circuit`` into Qiskit and "
+"then call the visualization built in Qiskit. Namely, we have ``c.draw()``"
+" API."
msgstr ""
-"从 Qiskit 导入:``c = tc.Circuit.from_qiskit(QuantumCircuit, n)`` 或者因为我们可"
-"以轻松地将 ``tc.Circuit`` 转换为 QuantumCircuit,我们有一个简单的管道来首先转换 "
-"``tc.Circuit`` 为 Qiskit,然后调用 Qiskit 中内置的可视化。 也就是说,我们有 ``c."
-"draw()`` API。"
+"从 Qiskit 导入:``c = tc.Circuit.from_qiskit(QuantumCircuit, n)`` "
+"或者因为我们可以轻松地将 ``tc.Circuit`` 转换为 QuantumCircuit,我们有一个简单的管道来首先转换 "
+"``tc.Circuit`` 为 Qiskit,然后调用 Qiskit 中内置的可视化。 也就是说,我们有 ``c.draw()`` API。"
#: ../../source/quickstart.rst:137
msgid "**Circuit Intermediate Representation:**"
@@ -316,21 +313,19 @@ msgstr "**电路中间表示:**"
#: ../../source/quickstart.rst:139
msgid ""
-"TensorCircuit provides its own circuit IR as a python list of dicts. This IR can "
-"be further utilized to run compiling, generate serialization qasm, or render "
-"circuit figures."
-msgstr ""
-"TensorCircuit 提供自己的中间表示是元素是字典的列表。此中间表示可进一步用于运行编"
-"译、生成序列化 qasm 或渲染电路图。"
+"TensorCircuit provides its own circuit IR as a python list of dicts. This"
+" IR can be further utilized to run compiling, generate serialization "
+"qasm, or render circuit figures."
+msgstr "TensorCircuit 提供自己的中间表示是元素是字典的列表。此中间表示可进一步用于运行编译、生成序列化 qasm 或渲染电路图。"
#: ../../source/quickstart.rst:141
msgid ""
-"The IR is given as a list, each element is a dict containing information on one "
-"gate that is applied to the circuit. Note gate attr in the dict is a python "
-"function that returns the gate's node."
+"The IR is given as a list, each element is a dict containing information "
+"on one gate that is applied to the circuit. Note gate attr in the dict is"
+" a python function that returns the gate's node."
msgstr ""
-"中间表示以列表形式给出,每个元素都是一个字典,其中包含应用于电路的一个量子门的信"
-"息。 注意字典中的 gate atrr 实际上是一个返回此量子门的节点的 python 函数。"
+"中间表示以列表形式给出,每个元素都是一个字典,其中包含应用于电路的一个量子门的信息。 注意字典中的 gate atrr "
+"实际上是一个返回此量子门的节点的 python 函数。"
#: ../../source/quickstart.rst:153
msgid "Programming Paradigm"
@@ -338,38 +333,36 @@ msgstr "编程范式"
#: ../../source/quickstart.rst:155
msgid ""
-"The most common case and the most typical programming paradigm for TensorCircuit "
-"are to evaluate the circuit output and the corresponding quantum gradients, "
-"which is common in variational quantum algorithms."
-msgstr ""
-"TensorCircuit 最常见的情况和最典型的编程范式是评估电路的输出以及相应的量子梯度,"
-"这在变分量子算法中很常见。"
+"The most common case and the most typical programming paradigm for "
+"TensorCircuit are to evaluate the circuit output and the corresponding "
+"quantum gradients, which is common in variational quantum algorithms."
+msgstr "TensorCircuit 最常见的情况和最典型的编程范式是评估电路的输出以及相应的量子梯度,这在变分量子算法中很常见。"
#: ../../source/quickstart.rst:182
#, fuzzy
msgid ""
-"Also for a non-quantum example (linear regression) demonstrating the backend "
-"agnostic feature, variables with pytree support, AD/jit/vmap usage, and "
-"variational optimization loops. Please refer to the example script: `linear "
-"regression example `_. This example might be more friendly to the "
-"machine learning community since it is purely classical while also showcasing "
-"the main features and paradigms of tensorcircuit."
+"Also for a non-quantum example (linear regression) demonstrating the "
+"backend agnostic feature, variables with pytree support, AD/jit/vmap "
+"usage, and variational optimization loops. Please refer to the example "
+"script: `linear regression example `_. This example "
+"might be more friendly to the machine learning community since it is "
+"purely classical while also showcasing the main features and paradigms of"
+" tensorcircuit."
msgstr ""
-"同样对于演示后端不可知特性的非量子示例(线性回归),pytree 支持变量、自动微分/即"
-"时编译/矢量并行化 用法和变分优化循环。请参考示例脚本: `线性回归示例 `_ 。 这"
-"个例子可能对机器学习的用户更友好,因为它纯粹是经典的,同时也展示了 TensorCircuit "
-"的主要特征和范式。"
+"同样对于演示后端不可知特性的非量子示例(线性回归),pytree 支持变量、自动微分/即时编译/矢量并行化 用法和变分优化循环。请参考示例脚本: "
+"`线性回归示例 `_ 。 "
+"这个例子可能对机器学习的用户更友好,因为它纯粹是经典的,同时也展示了 TensorCircuit 的主要特征和范式。"
#: ../../source/quickstart.rst:185
msgid ""
-"If the user has no intention to maintain the application code in a backend "
-"agnostic fashion, the API for ML frameworks can be more handily used and "
-"interleaved with the TensorCircuit API."
+"If the user has no intention to maintain the application code in a "
+"backend agnostic fashion, the API for ML frameworks can be more handily "
+"used and interleaved with the TensorCircuit API."
msgstr ""
-"如果用户无意以与后端无关的方式维护应用程序代码,则可以更方便地使用用于机器学习框"
-"架的 API 并将其与 TensorCircuit API 交替使用。"
+"如果用户无意以与后端无关的方式维护应用程序代码,则可以更方便地使用用于机器学习框架的 API 并将其与 TensorCircuit API "
+"交替使用。"
#: ../../source/quickstart.rst:220
msgid "Automatic Differentiation, JIT, and Vectorized Parallelism"
@@ -377,22 +370,21 @@ msgstr "自动微分、即时编译和矢量化并行 "
#: ../../source/quickstart.rst:222
msgid ""
-"For concepts of AD, JIT and VMAP, please refer to `Jax documentation `__ ."
+"For concepts of AD, JIT and VMAP, please refer to `Jax documentation "
+"`__ ."
msgstr ""
-"关于自动微分、即时编译和向量并行化,请参考 `Jax 文档 `__ 。"
+"关于自动微分、即时编译和向量并行化,请参考 `Jax 文档 "
+"`__ 。"
#: ../../source/quickstart.rst:224
msgid ""
"The related API design in TensorCircuit closely follows the functional "
-"programming design pattern in Jax with some slight differences. So we strongly "
-"recommend users learn some basics about Jax no matter which ML backend they "
-"intend to use."
+"programming design pattern in Jax with some slight differences. So we "
+"strongly recommend users learn some basics about Jax no matter which ML "
+"backend they intend to use."
msgstr ""
-"TensorCircuit 中的相关 API 设计与 Jax 中的函数式编程的设计模式密切相关,但是略有"
-"不同。因此,我们强烈建议用户学习一些有关 Jax 的基础知识,无论他们打算使用哪种机器"
-"学习后端。"
+"TensorCircuit 中的相关 API 设计与 Jax 中的函数式编程的设计模式密切相关,但是略有不同。因此,我们强烈建议用户学习一些有关 "
+"Jax 的基础知识,无论他们打算使用哪种机器学习后端。"
#: ../../source/quickstart.rst:226
msgid "**AD Support:**"
@@ -400,11 +392,9 @@ msgstr "**自动微分支持**"
#: ../../source/quickstart.rst:228
msgid ""
-"Gradients, vjps, jvps, natural gradients, Jacobians, and Hessians. AD is the "
-"base for all modern machine learning libraries."
-msgstr ""
-"梯度、矢量雅可比乘积、自然梯度、 Jacobian 矩阵和 Hessian 矩阵。自动微分是所有现代"
-"机器学习库的基础。"
+"Gradients, vjps, jvps, natural gradients, Jacobians, and Hessians. AD is "
+"the base for all modern machine learning libraries."
+msgstr "梯度、矢量雅可比乘积、自然梯度、 Jacobian 矩阵和 Hessian 矩阵。自动微分是所有现代机器学习库的基础。"
#: ../../source/quickstart.rst:232
msgid "**JIT Support:**"
@@ -412,19 +402,18 @@ msgstr "**自动微分支持**"
#: ../../source/quickstart.rst:234
msgid ""
-"Parameterized quantum circuits can run in a blink. Always use jit if the circuit "
-"will get evaluations multiple times, it can greatly boost the simulation with "
-"two or three order time reduction. But also be cautious, users need to be "
-"familiar with jit, otherwise, the jitted function may return unexpected results "
-"or recompile on every hit (wasting lots of time). To learn more about the jit "
-"mechanism, one can refer to documentation or blogs on ``tf.function`` or ``jax."
-"jit``, though these two still have subtle differences."
+"Parameterized quantum circuits can run in a blink. Always use jit if the "
+"circuit will get evaluations multiple times, it can greatly boost the "
+"simulation with two or three order time reduction. But also be cautious, "
+"users need to be familiar with jit, otherwise, the jitted function may "
+"return unexpected results or recompile on every hit (wasting lots of "
+"time). To learn more about the jit mechanism, one can refer to "
+"documentation or blogs on ``tf.function`` or ``jax.jit``, though these "
+"two still have subtle differences."
msgstr ""
-"参数化的量子电路可以在瞬间完成运行。如果电路将得到多次运行,请始终使用即时编译,"
-"它可以大大提高仿真速度,减少两到三个数量级的运行时间。但也要小心,用户需要熟悉 即"
-"时编译,否则,即时编译的函数可能会返回意外结果或每次在点击时都重新编译(浪费大量"
-"时间)。要了解更多关于即时编译机制的信息,可以参考关于 ``tf.function`` 或 ``jax."
-"jit`` 的文档或博客,即使这两者仍然存在细微差别。"
+"参数化的量子电路可以在瞬间完成运行。如果电路将得到多次运行,请始终使用即时编译,它可以大大提高仿真速度,减少两到三个数量级的运行时间。但也要小心,用户需要熟悉"
+" 即时编译,否则,即时编译的函数可能会返回意外结果或每次在点击时都重新编译(浪费大量时间)。要了解更多关于即时编译机制的信息,可以参考关于 "
+"``tf.function`` 或 ``jax.jit`` 的文档或博客,即使这两者仍然存在细微差别。"
#: ../../source/quickstart.rst:238
msgid "**VMAP Support:**"
@@ -432,12 +421,13 @@ msgstr "**自动微分支持**"
#: ../../source/quickstart.rst:240
msgid ""
-"Inputs, parameters, measurements, circuit structures, and Monte Carlo noise can "
-"all be evaluated in parallel. To learn more about vmap mechanism, one can refer "
-"to documentation or blogs on ``tf.vectorized_map`` or ``jax.vmap``."
+"Inputs, parameters, measurements, circuit structures, and Monte Carlo "
+"noise can all be evaluated in parallel. To learn more about vmap "
+"mechanism, one can refer to documentation or blogs on "
+"``tf.vectorized_map`` or ``jax.vmap``."
msgstr ""
-"输入、参数、测量、电路结构、蒙特卡洛噪声都可以并行测算。 要了解有关矢量并行化机制"
-"的更多信息,可以参考 ``tf.vectorized_map`` 或 ``jax.vmap`` 上的文档或博客。"
+"输入、参数、测量、电路结构、蒙特卡洛噪声都可以并行测算。 要了解有关矢量并行化机制的更多信息,可以参考 ``tf.vectorized_map``"
+" 或 ``jax.vmap`` 上的文档或博客。"
#: ../../source/quickstart.rst:245
msgid "Backend Agnosticism"
@@ -445,34 +435,36 @@ msgstr "后端无关特性"
#: ../../source/quickstart.rst:247
msgid ""
-"TensorCircuit supports TensorFlow, Jax, and PyTorch backends. We recommend using "
-"TensorFlow or Jax backend since PyTorch lacks advanced jit and vmap features."
+"TensorCircuit supports TensorFlow, Jax, and PyTorch backends. We "
+"recommend using TensorFlow or Jax backend since PyTorch lacks advanced "
+"jit and vmap features."
msgstr ""
-"TensorCircuit 支持 TensorFlow、Jax 和 PyTorch 后端。 我们建议使用 TensorFlow 或 "
-"Jax 后端,因为 PyTorch 缺乏高级 jit 和 vmap 功能。"
+"TensorCircuit 支持 TensorFlow、Jax 和 PyTorch 后端。 我们建议使用 TensorFlow 或 Jax "
+"后端,因为 PyTorch 缺乏高级 jit 和 vmap 功能。"
#: ../../source/quickstart.rst:249
msgid ""
-"The backend can be set as ``K=tc.set_backend(\"jax\")`` and ``K`` is the backend "
-"with a full set of APIs as a conventional ML framework, which can also be "
-"accessed by ``tc.backend``."
+"The backend can be set as ``K=tc.set_backend(\"jax\")`` and ``K`` is the "
+"backend with a full set of APIs as a conventional ML framework, which can"
+" also be accessed by ``tc.backend``."
msgstr ""
-"后端可以设置为 ``K=tc.set_backend(\"jax\")`` ,``K``作为常规机器学习框架的全套API"
-"的后端,也可以通过``tc .backend`` 被访问。"
+"后端可以设置为 ``K=tc.set_backend(\"jax\")`` ,``K``作为常规机器学习框架的全套API的后端,也可以通过``tc"
+" .backend`` 被访问。"
#: ../../source/quickstart.rst:272
#, fuzzy
msgid ""
-"The supported APIs in the backend come from two sources, one part is implemented "
-"in `TensorNetwork package `__ and the other part is implemented "
-"in `TensorCircuit package `__. To "
-"see all the backend agnostic APIs, try:"
+"The supported APIs in the backend come from two sources, one part is "
+"implemented in `TensorNetwork package "
+"`__"
+" and the other part is implemented in `TensorCircuit package "
+"`__. To see all the backend "
+"agnostic APIs, try:"
msgstr ""
-"在后端支持的 APIs 有两个来源 , 一个来自 `TensorNetwork `__ "
-"另一个来自 `TensorCircuit package `__。"
+"在后端支持的 APIs 有两个来源 , 一个来自 `TensorNetwork "
+"`__"
+" 另一个来自 `TensorCircuit package `__。"
#: ../../source/quickstart.rst:427
msgid ""
@@ -484,20 +476,19 @@ msgstr "转换 dtype"
#: ../../source/quickstart.rst:432
msgid ""
-"TensorCircuit supports simulation using 32/64 bit precession. The default dtype "
-"is 32-bit as \"complex64\". Change this by ``tc.set_dtype(\"complex128\")``."
+"TensorCircuit supports simulation using 32/64 bit precession. The default"
+" dtype is 32-bit as \"complex64\". Change this by "
+"``tc.set_dtype(\"complex128\")``."
msgstr ""
"TensorCircuit 支持使用 32/64 bit 精确度的模拟。默认的 dtype 是 32-bit 的 "
-"\"complex64\"。可以通过 ``tc.set_dtype(\"complex128\")`` 把 dtype 改为 "
-"\"complex 128\" 。"
+"\"complex64\"。可以通过 ``tc.set_dtype(\"complex128\")`` 把 dtype 改为 \"complex"
+" 128\" 。"
#: ../../source/quickstart.rst:435
msgid ""
-"``tc.dtypestr`` always returns the current dtype string: either \"complex64\" or "
-"\"complex128\"."
-msgstr ""
-"``tc.dtypestr`` 总会返回当前的 dtype 字符串: 不是 \"complex64\" 就是 "
-"\"complex128\"."
+"``tc.dtypestr`` always returns the current dtype string: either "
+"\"complex64\" or \"complex128\"."
+msgstr "``tc.dtypestr`` 总会返回当前的 dtype 字符串: 不是 \"complex64\" 就是 \"complex128\"."
#: ../../source/quickstart.rst:439
msgid "Setup the Contractor"
@@ -505,29 +496,29 @@ msgstr "设置 contractor"
#: ../../source/quickstart.rst:441
msgid ""
-"TensorCircuit is a tensornetwork contraction-based quantum circuit simulator. A "
-"contractor is for searching for the optimal contraction path of the circuit "
-"tensornetwork."
-msgstr ""
-"TensorCircuit 是一个基于张量网络收缩的量子电路模拟器。 contractor 用于搜索电路张"
-"量网络的最佳收缩路径。"
+"TensorCircuit is a tensornetwork contraction-based quantum circuit "
+"simulator. A contractor is for searching for the optimal contraction path"
+" of the circuit tensornetwork."
+msgstr "TensorCircuit 是一个基于张量网络收缩的量子电路模拟器。 contractor 用于搜索电路张量网络的最佳收缩路径。"
#: ../../source/quickstart.rst:443
msgid ""
-"There are various advanced contractors provided by third-party packages, such as "
-"`opt-einsum `__ and `cotengra `__."
+"There are various advanced contractors provided by third-party packages, "
+"such as `opt-einsum `__ and "
+"`cotengra `__."
msgstr ""
-"有各种第三方包提供的高级 contractor ,例如 `opt-einsum `__ 和 `cotengra `__ 。"
+"有各种第三方包提供的高级 contractor ,例如 `opt-einsum "
+"`__ 和 `cotengra "
+"`__ 。"
#: ../../source/quickstart.rst:445
msgid ""
-"`opt-einsum` is shipped with TensorNetwork package. To use cotengra, one needs "
-"to pip install it; kahypar is also recommended to install with cotengra."
+"`opt-einsum` is shipped with TensorNetwork package. To use cotengra, one "
+"needs to pip install it; kahypar is also recommended to install with "
+"cotengra."
msgstr ""
-"`opt-einsum` 随 TensorNetwork 软件包一起。如要使用 cotengra,则需要 pip 安装它; "
-"还建议安装 cotengra 随 kahypar 一起使用。"
+"`opt-einsum` 随 TensorNetwork 软件包一起。如要使用 cotengra,则需要 pip 安装它; 还建议安装 "
+"cotengra 随 kahypar 一起使用。"
#: ../../source/quickstart.rst:447
msgid "Some setup cases:"
@@ -536,15 +527,17 @@ msgstr "一些设置案例:"
#: ../../source/quickstart.rst:473
#, fuzzy
msgid ""
-"For advanced configurations on cotengra contractors, please refer to cotengra "
-"`doc `__ and more fancy "
-"examples can be found at `contractor tutorial `__."
+"For advanced configurations on cotengra contractors, please refer to "
+"cotengra `doc "
+"`__ and more "
+"fancy examples can be found at `contractor tutorial `__."
msgstr ""
-"有关 cotengra contractor 的高级配置,请参阅 cotengra `doc `__ 更多精彩示例在 `contractor 教程 "
-"`__."
+"有关 cotengra contractor 的高级配置,请参阅 cotengra `doc "
+"`__ 更多精彩示例在 "
+"`contractor 教程 `__."
#: ../../source/quickstart.rst:475
msgid "**Setup in Function or Context Level**"
@@ -552,11 +545,9 @@ msgstr "**函数和上下文级别的设置**"
#: ../../source/quickstart.rst:477
msgid ""
-"Beside global level setup, we can also setup the backend, the dtype, and the "
-"contractor at the function level or context manager level:"
-msgstr ""
-"除了全局级别设置,我们还可以在函数级别或上下文管理器级别设置后端、dtype 和"
-"contractor:"
+"Beside global level setup, we can also setup the backend, the dtype, and "
+"the contractor at the function level or context manager level:"
+msgstr "除了全局级别设置,我们还可以在函数级别或上下文管理器级别设置后端、dtype 和contractor:"
#: ../../source/quickstart.rst:495
msgid "Noisy Circuit Simulation"
@@ -568,12 +559,12 @@ msgstr "**蒙特卡洛态模拟器**"
#: ../../source/quickstart.rst:499
msgid ""
-"For the Monte Carlo trajectory noise simulator, the unitary Kraus channel can be "
-"handled easily. TensorCircuit also supports fully jittable and differentiable "
-"general Kraus channel Monte Carlo simulation, though."
+"For the Monte Carlo trajectory noise simulator, the unitary Kraus channel"
+" can be handled easily. TensorCircuit also supports fully jittable and "
+"differentiable general Kraus channel Monte Carlo simulation, though."
msgstr ""
-"对于蒙特卡洛轨迹噪声模拟器,可以轻松处理幺正的 Kraus 通道。 不过,TensorCircuit "
-"还支持完全可即时编译和可微分的通用 Kraus 通道蒙特卡罗模拟。"
+"对于蒙特卡洛轨迹噪声模拟器,可以轻松处理幺正的 Kraus 通道。 不过,TensorCircuit 还支持完全可即时编译和可微分的通用 "
+"Kraus 通道蒙特卡罗模拟。"
#: ../../source/quickstart.rst:526
msgid "**Density Matrix Simulator:**"
@@ -581,12 +572,10 @@ msgstr "**密度矩阵模拟器**"
#: ../../source/quickstart.rst:528
msgid ""
-"Density matrix simulator ``tc.DMCircuit`` simulates the noise in a full form, "
-"but takes twice qubits to do noiseless simulation. The API is the same as ``tc."
-"Circuit``."
-msgstr ""
-"密度矩阵模拟器``tc.DMCircuit`` 以完整形式模拟噪声,但需要两倍的量子比特。API 与 "
-"``tc.Circuit`` 基本相同。"
+"Density matrix simulator ``tc.DMCircuit`` simulates the noise in a full "
+"form, but takes twice qubits to do noiseless simulation. The API is the "
+"same as ``tc.Circuit``."
+msgstr "密度矩阵模拟器``tc.DMCircuit`` 以完整形式模拟噪声,但需要两倍的量子比特。API 与 ``tc.Circuit`` 基本相同。"
#: ../../source/quickstart.rst:547
msgid "**Experiment with quantum errors:**"
@@ -602,8 +591,8 @@ msgstr ""
#: ../../source/quickstart.rst:567
msgid ""
-"Readout error can be added in experiments for sampling and expectation value "
-"calculation."
+"Readout error can be added in experiments for sampling and expectation "
+"value calculation."
msgstr ""
#: ../../source/quickstart.rst:593
@@ -612,27 +601,27 @@ msgstr "矩阵乘积状态和矩阵乘积算子"
#: ../../source/quickstart.rst:595
msgid ""
-"TensorCircuit has its class for MPS and MPO originally defined in TensorNetwork "
-"as ``tc.QuVector``, ``tc.QuOperator``."
+"TensorCircuit has its class for MPS and MPO originally defined in "
+"TensorNetwork as ``tc.QuVector``, ``tc.QuOperator``."
msgstr ""
-"TensorCircuit 有自己的 MPS 和 MPO 类,起初在 TensorNetwork 中定义为“tc.QuVector” "
-"和 “tc.QuOperator”。"
+"TensorCircuit 有自己的 MPS 和 MPO 类,起初在 TensorNetwork 中定义为“tc.QuVector” 和 "
+"“tc.QuOperator”。"
#: ../../source/quickstart.rst:597
msgid ""
-"``tc.QuVector`` can be extracted from ``tc.Circuit`` as the tensor network form "
-"for the output state (uncontracted) by ``c.quvector()``."
+"``tc.QuVector`` can be extracted from ``tc.Circuit`` as the tensor "
+"network form for the output state (uncontracted) by ``c.quvector()``."
msgstr ""
-"作为``c.quvector()`` 的输出状态(未收缩)的张量网络形式,``tc.QuVector`` 可以从"
-"``tc.Circuit`` 中提取。"
+"作为``c.quvector()`` 的输出状态(未收缩)的张量网络形式,``tc.QuVector`` 可以从``tc.Circuit`` "
+"中提取。"
#: ../../source/quickstart.rst:599
msgid ""
-"The QuVector forms a wavefunction w, which can also be fed into Circuit as the "
-"inputs state as ``c=tc.Circuit(n, mps_inputs=w)``."
+"The QuVector forms a wavefunction w, which can also be fed into Circuit "
+"as the inputs state as ``c=tc.Circuit(n, mps_inputs=w)``."
msgstr ""
-"QuVector 形成一个波函数 w,它也可以作为 ``c=tc.Circuit(n, mps_inputs=w)`` 的输入"
-"状态输入到 Circuit 中。"
+"QuVector 形成一个波函数 w,它也可以作为 ``c=tc.Circuit(n, mps_inputs=w)`` 的输入状态输入到 "
+"Circuit 中。"
#: ../../source/quickstart.rst:601
msgid "MPS as input state for circuit"
@@ -640,8 +629,8 @@ msgstr "MPS 作为电路的输入状态"
#: ../../source/quickstart.rst:603
msgid ""
-"The MPS/QuVector representation of the input state has a more efficient and "
-"compact form."
+"The MPS/QuVector representation of the input state has a more efficient "
+"and compact form."
msgstr "输入状态的 MPS/QuVector 表示具有更高效和紧凑的形式。"
#: ../../source/quickstart.rst:615
@@ -660,12 +649,14 @@ msgstr "MPO 作为电路上的门"
#: ../../source/quickstart.rst:636
msgid ""
-"Instead of a common quantum gate in matrix/node format, we can directly apply a "
-"gate in MPO/QuOperator format."
+"Instead of a common quantum gate in matrix/node format, we can directly "
+"apply a gate in MPO/QuOperator format."
msgstr "代替矩阵/节点格式的普通量子门,我们可以直接应用 MPO/QuOperator 格式的门。"
#: ../../source/quickstart.rst:647
-msgid "The representative gate defined in MPO format is the ``multicontrol`` gate."
+msgid ""
+"The representative gate defined in MPO format is the ``multicontrol`` "
+"gate."
msgstr "以 MPO 格式定义的代表门是 ``multicontrol`` 门。"
#: ../../source/quickstart.rst:649
@@ -674,8 +665,8 @@ msgstr "MPO作为电路期望估测算子"
#: ../../source/quickstart.rst:651
msgid ""
-"We can also measure operator expectation on the circuit output state where the "
-"operator is in MPO/QuOperator format."
+"We can also measure operator expectation on the circuit output state "
+"where the operator is in MPO/QuOperator format."
msgstr "我们还可以测量运算符对 MPO/QuOperator 格式的电路输出状态的期望。"
#: ../../source/quickstart.rst:663
@@ -688,46 +679,47 @@ msgstr "**与 PyTorch 模块混合的 PyTorch 接口:**"
#: ../../source/quickstart.rst:667
msgid ""
-"As we have mentioned in the backend section, the PyTorch backend may lack "
-"advanced features. This doesn't mean we cannot hybrid the advanced circuit "
-"module with PyTorch neural module. We can run the quantum function on TensorFlow "
-"or Jax backend while wrapping it with a Torch interface."
+"As we have mentioned in the backend section, the PyTorch backend may lack"
+" advanced features. This doesn't mean we cannot hybrid the advanced "
+"circuit module with PyTorch neural module. We can run the quantum "
+"function on TensorFlow or Jax backend while wrapping it with a Torch "
+"interface."
msgstr ""
-"正如我们在后端部分提到的,PyTorch 后端可能缺少高级功能。 这并不意味着我们不能将高"
-"级量子电路模块与 PyTorch 神经模块混合。 我们可以在 TensorFlow 或 Jax 后端运行量子"
-"函数,同时使用 Torch 接口包装它。 "
+"正如我们在后端部分提到的,PyTorch 后端可能缺少高级功能。 这并不意味着我们不能将高级量子电路模块与 PyTorch 神经模块混合。 "
+"我们可以在 TensorFlow 或 Jax 后端运行量子函数,同时使用 Torch 接口包装它。 "
#: ../../source/quickstart.rst:694
msgid ""
-"For a GPU/CPU, torch/tensorflow, quantum/classical hybrid machine learning "
-"pipeline enabled by tensorcircuit, see `example script `__."
+"For a GPU/CPU, torch/tensorflow, quantum/classical hybrid machine "
+"learning pipeline enabled by tensorcircuit, see `example script "
+"`__."
msgstr ""
#: ../../source/quickstart.rst:696
msgid ""
-"There is also a more flexible torch interface that support static non-tensor "
-"inputs as keyword arguments, which can be utilized as below:"
+"There is also a more flexible torch interface that support static non-"
+"tensor inputs as keyword arguments, which can be utilized as below:"
msgstr ""
#: ../../source/quickstart.rst:710
msgid ""
-"We also provider wrapper of quantum function for torch module as :py:meth:"
-"`tensorcircuit.TorchLayer` alias to :py:meth:`tensorcircuit.torchnn.QuantumNet`."
+"We also provider wrapper of quantum function for torch module as "
+":py:meth:`tensorcircuit.TorchLayer` alias to "
+":py:meth:`tensorcircuit.torchnn.QuantumNet`."
msgstr ""
#: ../../source/quickstart.rst:712
msgid ""
-"For ``TorchLayer``, ``use_interface=True`` is by default, which natively allow "
-"the quantum function defined on other tensorcircuit backends, such as jax or tf "
-"for speed consideration."
+"For ``TorchLayer``, ``use_interface=True`` is by default, which natively "
+"allow the quantum function defined on other tensorcircuit backends, such "
+"as jax or tf for speed consideration."
msgstr ""
#: ../../source/quickstart.rst:714
msgid ""
-"``TorchLayer`` can process multiple input arguments as multiple function inputs, "
-"following torch practice."
+"``TorchLayer`` can process multiple input arguments as multiple function "
+"inputs, following torch practice."
msgstr ""
#: ../../source/quickstart.rst:742
@@ -736,28 +728,29 @@ msgstr ""
#: ../../source/quickstart.rst:744
msgid ""
-"Similar rules apply similar as torch interface. The interface can even be used "
-"within jit environment outside. See :py:meth:`tensorcircuit.interfaces."
-"tensorflow.tensorflow_interface`."
+"Similar rules apply similar as torch interface. The interface can even be"
+" used within jit environment outside. See "
+":py:meth:`tensorcircuit.interfaces.tensorflow.tensorflow_interface`."
msgstr ""
#: ../../source/quickstart.rst:747
msgid ""
-"We also provider ``enable_dlpack=True`` option in torch and tf interfaces, which "
-"allow the tensor transformation happen without memory transfer via dlpack, "
-"higher version of tf or torch package required."
+"We also provider ``enable_dlpack=True`` option in torch and tf "
+"interfaces, which allow the tensor transformation happen without memory "
+"transfer via dlpack, higher version of tf or torch package required."
msgstr ""
#: ../../source/quickstart.rst:750
msgid ""
-"We also provider wrapper of quantum function for keras layer as :py:meth:"
-"`tensorcircuit.KerasLayer` alias to :py:meth:`tensorcircuit.keras.KerasLayer`."
+"We also provider wrapper of quantum function for keras layer as "
+":py:meth:`tensorcircuit.KerasLayer` alias to "
+":py:meth:`tensorcircuit.keras.KerasLayer`."
msgstr ""
#: ../../source/quickstart.rst:752
msgid ""
-"``KerasLayer`` can process multiple input arguments with the input as a dict, "
-"following the common keras practice, see example below."
+"``KerasLayer`` can process multiple input arguments with the input as a "
+"dict, following the common keras practice, see example below."
msgstr ""
#: ../../source/quickstart.rst:774
@@ -766,11 +759,9 @@ msgstr "**使用 scipy接口使用scipy优化器:**"
#: ../../source/quickstart.rst:776
msgid ""
-"Automatically transform quantum functions as scipy-compatible values and grad "
-"functions as provided for scipy interface with ``jac=True``."
-msgstr ""
-"为带有 jac=True 的 scipy 接口自动将量子函数转换为与 scipy 兼容的 value 和 grad 函"
-"数。"
+"Automatically transform quantum functions as scipy-compatible values and "
+"grad functions as provided for scipy interface with ``jac=True``."
+msgstr "为带有 jac=True 的 scipy 接口自动将量子函数转换为与 scipy 兼容的 value 和 grad 函数。"
#: ../../source/quickstart.rst:802
msgid "Templates as Shortcuts"
@@ -785,18 +776,20 @@ msgid "Ising type Hamiltonian defined on a general graph"
msgstr "在一般图上定义的伊辛型哈密顿量"
#: ../../source/quickstart.rst:808
-msgid "See :py:meth:`tensorcircuit.templates.measurements.spin_glass_measurements`"
-msgstr ""
-"参考 :py:meth:`tensorcircuit.templates.measurements.spin_glass_measurements`"
+msgid ""
+"See "
+":py:meth:`tensorcircuit.templates.measurements.spin_glass_measurements`"
+msgstr "参考 :py:meth:`tensorcircuit.templates.measurements.spin_glass_measurements`"
#: ../../source/quickstart.rst:810
msgid "Heisenberg Hamiltonian on a general graph with possible external fields"
msgstr "具有可能存在的外场的一般图上的海森堡哈密顿量"
#: ../../source/quickstart.rst:812
-msgid "See :py:meth:`tensorcircuit.templates.measurements.heisenberg_measurements`"
-msgstr ""
-"参考 :py:meth:`tensorcircuit.templates.measurements.heisenberg_measurements`"
+msgid ""
+"See "
+":py:meth:`tensorcircuit.templates.measurements.heisenberg_measurements`"
+msgstr "参考 :py:meth:`tensorcircuit.templates.measurements.heisenberg_measurements`"
#: ../../source/quickstart.rst:814
msgid "**Circuit Blocks:**"
@@ -812,45 +805,71 @@ msgstr "**电路块**"
#~ msgstr "从GitHub安装"
#~ msgid ""
-#~ "For beta version usage, one needs to install tensorcircuit package from "
-#~ "GitHub. For development and PR workflow, please refer to `contribution "
+#~ "For beta version usage, one needs "
+#~ "to install tensorcircuit package from "
+#~ "GitHub. For development and PR workflow,"
+#~ " please refer to `contribution "
#~ "`__ instead."
#~ msgstr ""
-#~ "如需使用测试版本,则需要从 GitHub 安装 tensorcircuit。对于开发和 PR 工作流程,"
-#~ "请另外参考 `贡献 `__ 。"
+#~ "如需使用测试版本,则需要从 GitHub 安装 tensorcircuit。对于开发和 PR"
+#~ " 工作流程,请另外参考 `贡献 `__ 。"
#~ msgid ""
-#~ "For private tensorcircuit-dev repo, one needs to first configure the SSH key "
-#~ "on GitHub and locally, please refer to `GitHub doc