diff --git a/404.html b/404.html new file mode 100644 index 000000000000..afc764f29507 --- /dev/null +++ b/404.html @@ -0,0 +1,357 @@ + + + + + + + + + + + + + Oops! | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ + + + + + + + +
+ +
+
+ +
+
+
+
+ + +
+ + +

Oops!

+ +

You've reached a dead end.

+ +

+ If you feel like something should be here, you can open an issue on GitHub. +

+ +

+ Click here to go back to the main page. +

+
+
+
+
+
+ +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + + + + + + + + + + + + + + + + diff --git a/CNAME b/CNAME index c101f6da020d..583993f7b85f 100644 --- a/CNAME +++ b/CNAME @@ -1 +1 @@ -pytorch.org \ No newline at end of file +docs.pytorch.org diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..b91e23b17c02 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,76 @@ +# Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to make participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic +address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a +professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies within all project spaces, and it also applies when +an individual is representing the project or its community in public spaces. +Examples of representing a project or community include using an official +project e-mail address, posting via an official social media account, or acting +as an appointed representative at an online or offline event. Representation of +a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at . All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000000..90e93bd32f19 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to hub +We want to make contributing to this project as easy and transparent as +possible. + +## Pull Requests +We actively welcome your pull requests. + +1. Fork the repo and create your branch from `master`. +2. If you've added code that should be tested, add tests. +3. If you've changed APIs, update the documentation. +4. Ensure the test suite passes. +5. Make sure your code lints. +6. If you haven't already, complete the Contributor License Agreement ("CLA"). + +## Contributor License Agreement ("CLA") +In order to accept your pull request, we need you to submit a CLA. You only need +to do this once to work on any of Facebook's open source projects. + +Complete your CLA here: + +## Issues +We use GitHub issues to track public bugs. Please ensure your description is +clear and has sufficient instructions to be able to reproduce the issue. + +Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe +disclosure of security bugs. In those cases, please go through the process +outlined on that page and do not file a public issue. + +## License +By contributing to hub, you agree that your contributions will be licensed +under the LICENSE file in the root directory of this source tree. \ No newline at end of file diff --git a/Gemfile b/Gemfile deleted file mode 100644 index 076585ad2e5c..000000000000 --- a/Gemfile +++ /dev/null @@ -1,4 +0,0 @@ -source 'https://rubygems.org' - -gem 'github-pages', :group => :jekyll_plugins -gem 'breakpoint' \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000000..673979d26033 --- /dev/null +++ b/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2018, Facebook Inc +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Makefile b/Makefile deleted file mode 100644 index e111cceae1f3..000000000000 --- a/Makefile +++ /dev/null @@ -1,7 +0,0 @@ - -serve: - bundle exec jekyll serve --watch --trace - -setup: - gem install bundler - bundle install \ No newline at end of file diff --git a/README.md b/README.md deleted file mode 100644 index fa3e2c4cc9c3..000000000000 --- a/README.md +++ /dev/null @@ -1,2 +0,0 @@ -# PyTorch Website -http://pytorch.org diff --git a/_config.yml b/_config.yml index 7d6689783440..4b319e0d4e19 100644 --- a/_config.yml +++ b/_config.yml @@ -1,28 +1 @@ -url: http://pytorch.org -name: PyTorch -description: 'Scientific Computing...' -latest_version: 1.0 -baseurl: / -relative_permalinks: false -timezone: America/Los_Angeles -sass: - sass_dir: _sass # default - style: compressed -safe: true -highlighter: rouge -markdown: kramdown -future: true -include: - - _static - - _modules - - _sources - - docs/_sources -exclude: - - node_modules - - README.md - - CNAME - - Gemfile - - Gemfile.lock - - package.json - -# google_site_verification: eOAFtDphTbbm4OPKva2d3Z0Z_2bBxWMGdkD0IRQ6VeA +include: [_static, _images, _modules, _sources, _asserts.html, _creation.html, _comparison.html, _lowrank.html, _script.html, _diagnostic.html, _dynamo.html, _serialization.html, _type_utils, _tensor_str.html, _trace.html, _utils.html, _internal, _C, _distributed_autograd.html, _distributed_c10d.html, _distributed_rpc.html, _fft.html, _linalg.html, _monitor.html, _nested.html, _nn.html, _profiler.html, _sparse.html, _special.html, __config__.html, _dynamo, _lobpcg.html, _jit_internal.html, _numeric_suite.html, _numeric_suite_fx.html, _sanitizer.html, _symbolic_trace.html, _async.html, _freeze.html, _fuser.html, _type_utils.html, _utils ] diff --git a/_data/apps.yml b/_data/apps.yml deleted file mode 100644 index 07e2e7d55f15..000000000000 --- a/_data/apps.yml +++ /dev/null @@ -1,60 +0,0 @@ -- - name: "Slack" - desc: "A messaging app for teams" - url: "https://slack.com" - repository: "" - image: "/static/img/apps-test-img.png" - rating: 5 - tags: ["foo", "bar", "baz", "bing"] - downloads: "58,783" - featured: false -- - name: "Some Cool App" - desc: "A messaging app for teams with a longer desciription." - url: "https://slack.com" - repository: "" - image: "/static/img/apps-test-img.png" - rating: 4 - tags: ["foo", "bar", "baz", "bing"] - downloads: "11,783" - featured: false -- - name: "Slack" - desc: "A messaging app for teams" - url: "https://slack.com" - repository: "" - image: "/static/img/apps-test-img.png" - rating: 5 - tags: ["foo", "bar", "baz", "bing"] - downloads: "5,355" - featured: false -- - name: "An even longer cool name" - desc: "But a short description" - url: "https://slack.com" - repository: "" - image: "/static/img/apps-test-img.png" - rating: 1 - tags: ["foo", "bar", "baz", "bing", "foo", "bar", "baz", "bing"] - downloads: "8,783" - featured: false -- - name: "Really Neat App" - desc: "To monitor your children when you are not home and should be." - url: "https://slack.com" - repository: "" - image: "/static/img/apps-test-img.png" - rating: 5 - tags: ["foo", "bar", "baz", "bing"] - downloads: "878" - featured: false -- - name: "Slack" - desc: "A messaging app for teams" - url: "https://slack.com" - repository: "" - image: "/static/img/apps-test-img.png" - rating: 3 - tags: ["foo", "bar", "baz", "bing"] - downloads: "83" - featured: false \ No newline at end of file diff --git a/_data/wizard.yml b/_data/wizard.yml deleted file mode 100644 index 32b88dbdd539..000000000000 --- a/_data/wizard.yml +++ /dev/null @@ -1,113 +0,0 @@ -############ conda section ######################### -- - matcher: 'conda,linux,cuda7.5,python2.7' - cmd: 'conda install pytorch torchvision -c soumith' -- - matcher: 'conda,linux,cuda8.0,python2.7' - cmd: 'conda install pytorch torchvision cuda80 -c soumith' -- - matcher: 'conda,linux,cudanone,python2.7' - cmd: 'conda install pytorch torchvision -c soumith' -- - matcher: 'conda,linux,cuda7.5,python3.5' - cmd: 'conda install pytorch torchvision -c soumith' -- - matcher: 'conda,linux,cuda8.0,python3.5' - cmd: 'conda install pytorch torchvision cuda80 -c soumith' -- - matcher: 'conda,linux,cudanone,python3.5' - cmd: 'conda install pytorch torchvision -c soumith' -- - matcher: 'conda,linux,cuda7.5,python3.6' - cmd: 'conda install pytorch torchvision -c soumith' -- - matcher: 'conda,linux,cuda8.0,python3.6' - cmd: 'conda install pytorch torchvision cuda80 -c soumith' -- - matcher: 'conda,linux,cudanone,python3.6' - cmd: 'conda install pytorch torchvision -c soumith' -- - matcher: 'conda,osx,cuda7.5,python2.7' - cmd: 'conda install pytorch torchvision -c soumith
# OSX Binaries dont support CUDA, install from source if CUDA is needed' -- - matcher: 'conda,osx,cuda8.0,python2.7' - cmd: 'conda install pytorch torchvision -c soumith
# OSX Binaries dont support CUDA, install from source if CUDA is needed' -- - matcher: 'conda,osx,cudanone,python2.7' - cmd: 'conda install pytorch torchvision -c soumith
# OSX Binaries dont support CUDA, install from source if CUDA is needed' -- - matcher: 'conda,osx,cuda7.5,python3.5' - cmd: 'conda install pytorch torchvision -c soumith
# OSX Binaries dont support CUDA, install from source if CUDA is needed' -- - matcher: 'conda,osx,cuda8.0,python3.5' - cmd: 'conda install pytorch torchvision -c soumith
# OSX Binaries dont support CUDA, install from source if CUDA is needed' -- - matcher: 'conda,osx,cudanone,python3.5' - cmd: 'conda install pytorch torchvision -c soumith
# OSX Binaries dont support CUDA, install from source if CUDA is needed' -- - matcher: 'conda,osx,cuda7.5,python3.6' - cmd: 'conda install pytorch torchvision -c soumith
# OSX Binaries dont support CUDA, install from source if CUDA is needed' -- - matcher: 'conda,osx,cuda8.0,python3.6' - cmd: 'conda install pytorch torchvision -c soumith
# OSX Binaries dont support CUDA, install from source if CUDA is needed' -- - matcher: 'conda,osx,cudanone,python3.6' - cmd: 'conda install pytorch torchvision -c soumith
# OSX Binaries dont support CUDA, install from source if CUDA is needed' - -############ pip section ######################### -######### OSX ###################### -- - matcher: 'pip,osx,cuda7.5,python2.7' - cmd: 'pip install http://download.pytorch.org/whl/torch-0.1.10.post1-cp27-none-macosx_10_7_x86_64.whl
pip install torchvision
# OSX Binaries dont support CUDA, install from source if CUDA is needed' -- - matcher: 'pip,osx,cuda8.0,python2.7' - cmd: 'pip install http://download.pytorch.org/whl/torch-0.1.10.post1-cp27-none-macosx_10_7_x86_64.whl
pip install torchvision
# OSX Binaries dont support CUDA, install from source if CUDA is needed' -- - matcher: 'pip,osx,cudanone,python2.7' - cmd: 'pip install http://download.pytorch.org/whl/torch-0.1.10.post1-cp27-none-macosx_10_7_x86_64.whl
pip install torchvision
# OSX Binaries dont support CUDA, install from source if CUDA is needed' -- - matcher: 'pip,osx,cuda7.5,python3.5' - cmd: 'pip install http://download.pytorch.org/whl/torch-0.1.10.post1-cp35-cp35m-macosx_10_6_x86_64.whl
pip install torchvision
# OSX Binaries dont support CUDA, install from source if CUDA is needed' -- - matcher: 'pip,osx,cuda8.0,python3.5' - cmd: 'pip install http://download.pytorch.org/whl/torch-0.1.10.post1-cp35-cp35m-macosx_10_6_x86_64.whl
pip install torchvision
# OSX Binaries dont support CUDA, install from source if CUDA is needed' -- - matcher: 'pip,osx,cudanone,python3.5' - cmd: 'pip install http://download.pytorch.org/whl/torch-0.1.10.post1-cp35-cp35m-macosx_10_6_x86_64.whl
pip install torchvision
# OSX Binaries dont support CUDA, install from source if CUDA is needed' -- - matcher: 'pip,osx,cuda7.5,python3.6' - cmd: 'pip install http://download.pytorch.org/whl/torch-0.1.10.post1-cp36-cp36m-macosx_10_7_x86_64.whl
pip install torchvision
# OSX Binaries dont support CUDA, install from source if CUDA is needed' -- - matcher: 'pip,osx,cuda8.0,python3.6' - cmd: 'pip install http://download.pytorch.org/whl/torch-0.1.10.post1-cp36-cp36m-macosx_10_7_x86_64.whl
pip install torchvision
# OSX Binaries dont support CUDA, install from source if CUDA is needed' -- - matcher: 'pip,osx,cudanone,python3.6' - cmd: 'pip install http://download.pytorch.org/whl/torch-0.1.10.post1-cp36-cp36m-macosx_10_7_x86_64.whl
pip install torchvision
# OSX Binaries dont support CUDA, install from source if CUDA is needed' -######### Linux ###################### -- - matcher: 'pip,linux,cuda7.5,python2.7' - cmd: 'pip install http://download.pytorch.org/whl/cu75/torch-0.1.10.post2-cp27-none-linux_x86_64.whl
pip install torchvision' -- - matcher: 'pip,linux,cudanone,python2.7' - cmd: 'pip install http://download.pytorch.org/whl/cu75/torch-0.1.10.post2-cp27-none-linux_x86_64.whl
pip install torchvision' -- - matcher: 'pip,linux,cuda8.0,python2.7' - cmd: 'pip install http://download.pytorch.org/whl/cu80/torch-0.1.10.post2-cp27-none-linux_x86_64.whl
pip install torchvision' -- - matcher: 'pip,linux,cuda7.5,python3.5' - cmd: 'pip install http://download.pytorch.org/whl/cu75/torch-0.1.10.post2-cp35-cp35m-linux_x86_64.whl
pip install torchvision' -- - matcher: 'pip,linux,cudanone,python3.5' - cmd: 'pip install http://download.pytorch.org/whl/cu75/torch-0.1.10.post2-cp35-cp35m-linux_x86_64.whl
pip install torchvision' -- - matcher: 'pip,linux,cuda8.0,python3.5' - cmd: 'pip install http://download.pytorch.org/whl/cu80/torch-0.1.10.post2-cp35-cp35m-linux_x86_64.whl
pip install torchvision' -- - matcher: 'pip,linux,cuda7.5,python3.6' - cmd: 'pip install http://download.pytorch.org/whl/cu75/torch-0.1.10.post2-cp36-cp36m-linux_x86_64.whl
pip install torchvision' -- - matcher: 'pip,linux,cudanone,python3.6' - cmd: 'pip install http://download.pytorch.org/whl/cu75/torch-0.1.10.post2-cp36-cp36m-linux_x86_64.whl
pip install torchvision' -- - matcher: 'pip,linux,cuda8.0,python3.6' - cmd: 'pip install http://download.pytorch.org/whl/cu80/torch-0.1.10.post2-cp36-cp36m-linux_x86_64.whl
pip install torchvision' diff --git a/_includes/footer.html b/_includes/footer.html deleted file mode 100644 index 2b96a18c857e..000000000000 --- a/_includes/footer.html +++ /dev/null @@ -1,21 +0,0 @@ -
- - {% if page.id != 'home' %} -
- {% endif %} - -
- -

- Maintained by the PyTorch core team.
- ©2017 PyTorch -

-
- - {% include primary-nav.html %} - - {% if page.id != 'home' %} -
- {% endif %} - -
diff --git a/_includes/header.html b/_includes/header.html deleted file mode 100644 index 41f96e30a6e6..000000000000 --- a/_includes/header.html +++ /dev/null @@ -1,16 +0,0 @@ -
- - {% if page.id != 'home' and page.id != 'docs' %} -
- {% endif %} - - - Docs - - {% include primary-nav.html %} - - {% if page.id != 'home' and page.id != 'docs' %} -
- {% endif %} - -
\ No newline at end of file diff --git a/_includes/primary-nav.html b/_includes/primary-nav.html deleted file mode 100644 index 233382440bea..000000000000 --- a/_includes/primary-nav.html +++ /dev/null @@ -1,6 +0,0 @@ - diff --git a/_layouts/about.html b/_layouts/about.html deleted file mode 100644 index 187ac113f5f4..000000000000 --- a/_layouts/about.html +++ /dev/null @@ -1,28 +0,0 @@ - - - - {{ page.title }} - - - - - - - - - - {% include header.html %} - -
-
- {{ content }} -
-
- - {% include footer.html %} - - - \ No newline at end of file diff --git a/_layouts/default.html b/_layouts/default.html deleted file mode 100644 index 0d46c4c251a1..000000000000 --- a/_layouts/default.html +++ /dev/null @@ -1,24 +0,0 @@ - - - - {{ page.title }} - - - - - - - - - - {% include header.html %} - - {{ content }} - - {% include footer.html %} - - - \ No newline at end of file diff --git a/_layouts/tutorial.html b/_layouts/tutorial.html deleted file mode 100644 index 187ac113f5f4..000000000000 --- a/_layouts/tutorial.html +++ /dev/null @@ -1,28 +0,0 @@ - - - - {{ page.title }} - - - - - - - - - - {% include header.html %} - -
-
- {{ content }} -
-
- - {% include footer.html %} - - - \ No newline at end of file diff --git a/_sass/_about.scss b/_sass/_about.scss deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/_sass/_apps.scss b/_sass/_apps.scss deleted file mode 100644 index 0646a8f1644a..000000000000 --- a/_sass/_apps.scss +++ /dev/null @@ -1,127 +0,0 @@ - -#apps { - p { - margin-bottom: 60px; - } -} - -.apps { - overflow: hidden; - @include border-radius(3px); - - .app-row { - background-color: $grey-light; - font-size: 14px; - line-height: 18px; - color: #808080; - border-bottom: 1px solid #d9d9d9; - width: 100%; - float: left; - - &.apps-header { - background-color: $grey; - border-bottom: none; - color: #fff; - font-weight: 200; - - .name-cell { - color: #fff; - font-weight: 200; - } - - .downloads-cell { - color: #fff; - font-weight: 200; - font-size: 14px; - } - - .app-cell { - height: auto; - } - } - - .app-cell { - padding: 16px 16px 16px 0; - width: 15%; - float: left; - height: 100%; - height: 90px; - - &:first-child { - padding-left: 16px; - } - - > div { - @extend %vertical-align; - } - } - - .img-cell { - width: 10%; - - > div { - background-repeat: no-repeat; - background-size: cover; - background-position: center; - background-color: $grey; - height: 58px; - width: 58px; - } - } - - .name-cell { - color: $grey; - font-weight: 600; - padding-left: 5px; - } - - .desc-cell { - width: 19%; - font-weight: 200; - } - - .downloads-cell { - font-size: 16px; - font-weight: 600; - color: $grey-medium; - width: 11%; - } - - .rating-cell { - .star { - float: left; - color: $grey-medium; - font-size: 20px; - - &.active { - color: $orange; - } - } - } - - .tags-cell { - font-weight: 200; - } - - .learn-cell {} - - .btn { - font-weight: 600; - font-size: 14px; - height: 38px; - line-height: 36px; - padding: 0 30px; - border: 2px solid $grey-medium; - color: $grey-medium; - @extend %vertical-align; - @include border-radius(19px); - - &:hover { - background-color: $orange; - border-color: $orange; - color: #fff; - } - } - } -} - diff --git a/_sass/_base.scss b/_sass/_base.scss deleted file mode 100644 index be9f6d9b26b3..000000000000 --- a/_sass/_base.scss +++ /dev/null @@ -1,143 +0,0 @@ -@import "normalize"; -@import "mixins"; - - -$grey: #333333; -$grey-medium: #999999; -$grey-light: #f7f7f7; -$red-orange: #f15532; -$text-color: #000000; -$orange: #f4921f; -$header-height: 75px; - - -html, body, ul, li { - margin: 0; - padding: 0; -} - -body { - background-color: $grey; - font-family: "Open Sans", "Helvetica", sans-serif; - font-size: 12px; - color: $text-color; -} - -h1, h2, h3, h4 { - -webkit-margin-before: 0em; - -webkit-margin-after: 0em; - font-weight: 200; -} - -a:hover { - text-decoration: none; -} - -.wrap {} - -.btn { - cursor: pointer; - @include transition(background-color 200ms, color 200ms, linear); -} - -.content { - background-color: #fff; - padding: 98px 0 123px; - - h1 { - font-size: 45px; - margin-bottom: 30px; - } - - h3 { - margin-top: 25px; - margin-bottom: 15px; - } - - p { - font-size: 18px; - color: $text-color; - font-weight: 200; - margin-bottom: 30px; - } - - img { - max-width: 100%; - } - - code { - font-family: monospace !important; - font-weight: 400; - font-size: 0.9em; - background-color: #eee; - padding: 3px 7px; - border-radius: 3px; - color: #666; - word-break: break-all; - -webkit-hyphens: auto; - -moz-hyphens: auto; - -ms-hyphens: auto; - hyphens: auto; - text-shadow: 0px 1px 0px #fff; - } - - ul { - margin-left: 25px; - margin-bottom: 35px; - font-weight: 200; - font-size: 16px; - - li { - color: $text-color; - margin-bottom: 15px; - } - } -} - -table { - margin-bottom: 30px; - overflow: auto; - font-size: 1.1em; - - th, td { - padding: 10px; - text-align: left; - vertical-align: top; - line-height: 1.6; - } - - th { - background-color: $grey; - color: #fff; - border-bottom: none; - font-weight: 600; - - &:first-child { - border-top-left-radius: 6px; - } - - &:last-child { - border-top-right-radius: 6px; - } - } - - td { - color: $text-color; - } - - tr, tr:last-child { - border-bottom: 1px solid #e6e6e6; - } - - tr:nth-child(odd)>td { - background-color: #fff; - } - - tr:nth-child(even)>td { - background-color: #fcfcfc; - } - - tr > td:first-child { - font-weight: 400; - } - } diff --git a/_sass/_chrome.scss b/_sass/_chrome.scss deleted file mode 100644 index 54f93dfd6a44..000000000000 --- a/_sass/_chrome.scss +++ /dev/null @@ -1,124 +0,0 @@ - -.logo { - background-image: url(../img/pytorch-logo-light.svg); - background-repeat: no-repeat; -} - -ul.primary-nav { - text-align: center; - float: right; - - li { - list-style-type: none; - float: left; - - a { - padding: 0 30px; - text-decoration: none; - color: $grey-medium; - height: 100%; - width: 100%; - float: left; - @include transition(color 200ms, linear); - - &.active, - &:hover { - color: #fff; - text-decoration: none; - } - } - } -} - -header { - background-color: #262626; - height: $header-height; - width: 100%; - - .logo { - width: 151px; - height: 31px; - float: left; - position: relative; - top: 18px; - - a { - display: block; - height: 100%; - } - } - - a { - font-size: 14px; - font-weight: 400; - } - - ul.primary-nav { - height: 100%; - line-height: 76px; - margin-right: 22px; - } - - .btn { - display: block; - height: 40px; - line-height: 38px; - padding: 0 21px; - border: 2px solid $grey-medium; - color: $grey-medium; - float: right; - position: relative; - top: 17px; - @include transition(border-color 200ms, color 200ms, linear); - @include border-radius(25px); - - &.active, - &:hover { - border-color: #fff; - color: #fff; - } - } -} - -footer { - background-color: $grey; - height: 150px; - padding: 33px 0 31px; - - .left { - color: #808080; - font-size: 14px; - width: 245px; - line-height: 27px; - font-weight: 200; - float: left; - } - - .logo { - width: 135px; - height: 28px; - margin-bottom: 10px; - - a { - display: block; - height: 100%; - } - } - - a { - font-size: 14px; - font-weight: 400; - } - - ul.primary-nav { - height: 50px; - line-height: 50px; - position: relative; - top: 18px; - - li:last-child a { - padding-right: 0; - } - } -} - diff --git a/_sass/_docs.scss b/_sass/_docs.scss deleted file mode 100644 index 3721d4bac062..000000000000 --- a/_sass/_docs.scss +++ /dev/null @@ -1,20 +0,0 @@ - -#docs { - - header { - padding-right: 2%; - padding-left: 2%; - } - - footer { - display: none; - } - - iframe { - position: fixed; - left: 0; - bottom: 0; - width: 100%; - height: calc(100% - #{$header-height}); - } -} diff --git a/_sass/_home.scss b/_sass/_home.scss deleted file mode 100644 index 8f461cc4de65..000000000000 --- a/_sass/_home.scss +++ /dev/null @@ -1,324 +0,0 @@ - -#home { - header, footer { - padding-right: 8%; - padding-left: 8%; - } - - .container { - max-width: 970px !important; - } -} - -.hero { - background-image: url(../img/hero.png); - background-repeat: no-repeat; - background-size: cover; - background-position: center; - background-color: $grey; - height: 500px; - position: relative; - - .container { - height: 100%; - text-align: center; - } - - .inner { - @extend %vertical-align; - } - - h1 { - color: #fff; - font-size: 40px; - letter-spacing: -0.01em; - line-height: 1.22em; - margin-bottom: 30px; - } - - h2 { - color: #fff; - font-size: 18px; - line-height: 1.55em; - letter-spacing: 0em; - margin-bottom: 25px; - } - - @media screen and (max-width: 400px) { - h1 { - font-size: 30px; - } - - h2 { - font-size: 15px; - } - - .btn { - font-size: 13px; - } - } - - .btn { - font-weight: 600; - font-size: 14px; - height: 48px; - line-height: 46px; - padding: 0 45px; - border: 2px solid #fff; - margin-top: 10px; - color: #fff; - @include border-radius(24px); - - &:hover { - background-color: #fff; - color: $red-orange; - } - } -} - -.install-wizard { - background-color: #fff; - padding: 108px 0; - - h3 { - font-size: 50px; - margin-bottom: 13px; - } - - h4 { - font-size: 16px; - line-height: 22px; - color: $grey-medium; - } - - .row { - margin-bottom: 30px; - } - - .title { - padding-top: 10px; - } - - .options-sets { - text-align: right; - color: $grey-medium; - font-size: 14px; - - .option-row { - float: left; - width: 100%; - margin-bottom: 15px; - - .option-label { - float: left; - width: 25%; - text-align: right; - padding-right: 15px; - padding-top: 10px; - display: table; - vertical-align: middle; - } - - .option-set { - float: left; - width: 75%; - } - - .btn { - font-weight: 600; - font-size: 13px; - height: 38px; - line-height: 36px; - padding: 0; - border: 2px solid #ccc; - color: $grey-medium; - @include border-radius(19px); - - &.selected, - &:hover { - background-color: $grey; - color: #fff; - border-color: $grey; - } - - &:last-child { - margin-right: 0 !important; - } - } - - &.cuda { - .btn { - width: (91% / 3); - margin-right: 2%; - } - } - - &.os { - .btn { - width: (94% / 2); - margin-right: 2%; - } - } - - &.pm { - .btn { - width: (91% / 3); - margin-right: 2%; - } - } - - &.python { - .btn { - width: (91% / 3); - margin-right: 2%; - } - } - - } - } - - .command { - background-color: $grey; - overflow: hidden; - @include border-radius(7px); - - .label { - font-weight: 600; - font-size: 14px; - float: left; - width: 180px; - padding: 18px 10px 12px; - text-align: center; - } - - .text { - background-color: $grey-light; - font-weight: 200; - font-size: 15px; - float: left; - width: calc(100% - 180px); - padding: 15px 25px;; - } - } -} - -.projects-tutorials { - background-color: $grey-light; - padding: 90px 0 20px; - - h3 { - font-size: 40px; - margin-bottom: 20px; - } - - h4 { - font-size: 17px; - margin-bottom: 45px; - color: $grey-medium; - } - - .box { - text-align: center; - padding: 10px 0; - margin-bottom: 70px; - - img { - width: 60px; - height: 60px; - margin-bottom: 35px; - @include opacity(0.5); - } - - &.projects { - img { - width: 50px; - height: 50px; - } - } - - &:first-child { - border-right: 1px solid #d9d9d9; - } - } - - .btn { - font-weight: 600; - font-size: 14px; - height: 50px; - line-height: 48px; - padding: 0 60px; - border: 2px solid $red-orange; - color: $grey; - @include border-radius(25px); - - &:hover { - background-color: $red-orange; - color: #fff; - } - } -} - -.logos { - background-color: #fff; - text-align: center; - padding: 121px 0 100px; - - h3 { - font-size: 40px; - margin-bottom: 85px; - } - - .logos-wrapper { - width: 960px; - height: 312px; - margin: 0 auto; - - img { - float: left; - } - } - - .row { - height: 150px; - - div { - height: 100%; - } - } - - @media screen and (max-width: 768px) { - .row { - height: 100px; - - img { - max-width: 70%; - } - - img.smaller { - max-width: 40%; - } - - img.smallerer { - max-width: 30%; - } - } - } - - img { - @extend %vertical-align; - max-width: 90%; - } - - .smaller { - max-width: 55%; - } - - .smallerer { - max-width: 40%; - } - - .larger { - max-width: 100%; - } -} - diff --git a/_sass/_mixins.scss b/_sass/_mixins.scss deleted file mode 100644 index 0adca7acda51..000000000000 --- a/_sass/_mixins.scss +++ /dev/null @@ -1,57 +0,0 @@ - -%vertical-align { - position: relative; - top: 50%; - -webkit-transform: translateY(-50%); - -ms-transform: translateY(-50%); - transform: translateY(-50%); -} - -%no-select { - -webkit-touch-callout: none; - -webkit-user-select: none; - -khtml-user-select: none; - -moz-user-select: none; - -ms-user-select: none; - user-select: none; -} - -%truncate-text { - white-space: nowrap; - overflow: hidden; - text-overflow: ellipsis; -} - -@mixin border-radius($radius) { - -webkit-border-radius: $radius; - -moz-border-radius: $radius; - -ms-border-radius: $radius; - border-radius: $radius; -} - -@mixin transition($args...) { - -webkit-transition: $args; - -moz-transition: $args; - -ms-transition: $args; - -o-transition: $args; - transition: $args; -} - -@mixin animation($str) { - -webkit-animation: #{$str}; - -moz-animation: #{$str}; - -ms-animation: #{$str}; - -o-animation: #{$str}; - animation: #{$str}; -} - -@mixin box-shadow($horiz:1px, $vert:1px, $blur:3px, $spread:1px, $color:rgba(0,0,0, 0.2)) { - -webkit-box-shadow: $horiz $vert $blur $spread $color; - -moz-box-shadow: $horiz $vert $blur $spread $color; - box-shadow: $horiz $vert $blur $spread $color; -} - -@mixin opacity($opacity) { - opacity: $opacity; - filter: alpha(opacity=($opacity * 100)); //IE8 -} \ No newline at end of file diff --git a/_sass/_normalize.scss b/_sass/_normalize.scss deleted file mode 100644 index 01ee6971c4d1..000000000000 --- a/_sass/_normalize.scss +++ /dev/null @@ -1,461 +0,0 @@ -/*! normalize.css v5.0.0 | MIT License | github.com/necolas/normalize.css */ - -/** - * 1. Change the default font family in all browsers (opinionated). - * 2. Correct the line height in all browsers. - * 3. Prevent adjustments of font size after orientation changes in - * IE on Windows Phone and in iOS. - */ - -/* Document - ========================================================================== */ - -html { - font-family: sans-serif; /* 1 */ - line-height: 1.15; /* 2 */ - -ms-text-size-adjust: 100%; /* 3 */ - -webkit-text-size-adjust: 100%; /* 3 */ -} - -/* Sections - ========================================================================== */ - -/** - * Remove the margin in all browsers (opinionated). - */ - -body { - margin: 0; -} - -/** - * Add the correct display in IE 9-. - */ - -article, -aside, -footer, -header, -nav, -section { - display: block; -} - -/** - * Correct the font size and margin on `h1` elements within `section` and - * `article` contexts in Chrome, Firefox, and Safari. - */ - -h1 { - font-size: 2em; - margin: 0.67em 0; -} - -/* Grouping content - ========================================================================== */ - -/** - * Add the correct display in IE 9-. - * 1. Add the correct display in IE. - */ - -figcaption, -figure, -main { /* 1 */ - display: block; -} - -/** - * Add the correct margin in IE 8. - */ - -figure { - margin: 1em 40px; -} - -/** - * 1. Add the correct box sizing in Firefox. - * 2. Show the overflow in Edge and IE. - */ - -hr { - box-sizing: content-box; /* 1 */ - height: 0; /* 1 */ - overflow: visible; /* 2 */ -} - -/** - * 1. Correct the inheritance and scaling of font size in all browsers. - * 2. Correct the odd `em` font sizing in all browsers. - */ - -pre { - font-family: monospace, monospace; /* 1 */ - font-size: 1em; /* 2 */ -} - -/* Text-level semantics - ========================================================================== */ - -/** - * 1. Remove the gray background on active links in IE 10. - * 2. Remove gaps in links underline in iOS 8+ and Safari 8+. - */ - -a { - background-color: transparent; /* 1 */ - -webkit-text-decoration-skip: objects; /* 2 */ -} - -/** - * Remove the outline on focused links when they are also active or hovered - * in all browsers (opinionated). - */ - -a:active, -a:hover { - outline-width: 0; -} - -/** - * 1. Remove the bottom border in Firefox 39-. - * 2. Add the correct text decoration in Chrome, Edge, IE, Opera, and Safari. - */ - -abbr[title] { - border-bottom: none; /* 1 */ - text-decoration: underline; /* 2 */ - text-decoration: underline dotted; /* 2 */ -} - -/** - * Prevent the duplicate application of `bolder` by the next rule in Safari 6. - */ - -b, -strong { - font-weight: inherit; -} - -/** - * Add the correct font weight in Chrome, Edge, and Safari. - */ - -b, -strong { - font-weight: bolder; -} - -/** - * 1. Correct the inheritance and scaling of font size in all browsers. - * 2. Correct the odd `em` font sizing in all browsers. - */ - -code, -kbd, -samp { - font-family: monospace, monospace; /* 1 */ - font-size: 1em; /* 2 */ -} - -/** - * Add the correct font style in Android 4.3-. - */ - -dfn { - font-style: italic; -} - -/** - * Add the correct background and color in IE 9-. - */ - -mark { - background-color: #ff0; - color: #000; -} - -/** - * Add the correct font size in all browsers. - */ - -small { - font-size: 80%; -} - -/** - * Prevent `sub` and `sup` elements from affecting the line height in - * all browsers. - */ - -sub, -sup { - font-size: 75%; - line-height: 0; - position: relative; - vertical-align: baseline; -} - -sub { - bottom: -0.25em; -} - -sup { - top: -0.5em; -} - -/* Embedded content - ========================================================================== */ - -/** - * Add the correct display in IE 9-. - */ - -audio, -video { - display: inline-block; -} - -/** - * Add the correct display in iOS 4-7. - */ - -audio:not([controls]) { - display: none; - height: 0; -} - -/** - * Remove the border on images inside links in IE 10-. - */ - -img { - border-style: none; -} - -/** - * Hide the overflow in IE. - */ - -svg:not(:root) { - overflow: hidden; -} - -/* Forms - ========================================================================== */ - -/** - * 1. Change the font styles in all browsers (opinionated). - * 2. Remove the margin in Firefox and Safari. - */ - -button, -input, -optgroup, -select, -textarea { - font-family: sans-serif; /* 1 */ - font-size: 100%; /* 1 */ - line-height: 1.15; /* 1 */ - margin: 0; /* 2 */ -} - -/** - * Show the overflow in IE. - * 1. Show the overflow in Edge. - */ - -button, -input { /* 1 */ - overflow: visible; -} - -/** - * Remove the inheritance of text transform in Edge, Firefox, and IE. - * 1. Remove the inheritance of text transform in Firefox. - */ - -button, -select { /* 1 */ - text-transform: none; -} - -/** - * 1. Prevent a WebKit bug where (2) destroys native `audio` and `video` - * controls in Android 4. - * 2. Correct the inability to style clickable types in iOS and Safari. - */ - -button, -html [type="button"], /* 1 */ -[type="reset"], -[type="submit"] { - -webkit-appearance: button; /* 2 */ -} - -/** - * Remove the inner border and padding in Firefox. - */ - -button::-moz-focus-inner, -[type="button"]::-moz-focus-inner, -[type="reset"]::-moz-focus-inner, -[type="submit"]::-moz-focus-inner { - border-style: none; - padding: 0; -} - -/** - * Restore the focus styles unset by the previous rule. - */ - -button:-moz-focusring, -[type="button"]:-moz-focusring, -[type="reset"]:-moz-focusring, -[type="submit"]:-moz-focusring { - outline: 1px dotted ButtonText; -} - -/** - * Change the border, margin, and padding in all browsers (opinionated). - */ - -fieldset { - border: 1px solid #c0c0c0; - margin: 0 2px; - padding: 0.35em 0.625em 0.75em; -} - -/** - * 1. Correct the text wrapping in Edge and IE. - * 2. Correct the color inheritance from `fieldset` elements in IE. - * 3. Remove the padding so developers are not caught out when they zero out - * `fieldset` elements in all browsers. - */ - -legend { - box-sizing: border-box; /* 1 */ - color: inherit; /* 2 */ - display: table; /* 1 */ - max-width: 100%; /* 1 */ - padding: 0; /* 3 */ - white-space: normal; /* 1 */ -} - -/** - * 1. Add the correct display in IE 9-. - * 2. Add the correct vertical alignment in Chrome, Firefox, and Opera. - */ - -progress { - display: inline-block; /* 1 */ - vertical-align: baseline; /* 2 */ -} - -/** - * Remove the default vertical scrollbar in IE. - */ - -textarea { - overflow: auto; -} - -/** - * 1. Add the correct box sizing in IE 10-. - * 2. Remove the padding in IE 10-. - */ - -[type="checkbox"], -[type="radio"] { - box-sizing: border-box; /* 1 */ - padding: 0; /* 2 */ -} - -/** - * Correct the cursor style of increment and decrement buttons in Chrome. - */ - -[type="number"]::-webkit-inner-spin-button, -[type="number"]::-webkit-outer-spin-button { - height: auto; -} - -/** - * 1. Correct the odd appearance in Chrome and Safari. - * 2. Correct the outline style in Safari. - */ - -[type="search"] { - -webkit-appearance: textfield; /* 1 */ - outline-offset: -2px; /* 2 */ -} - -/** - * Remove the inner padding and cancel buttons in Chrome and Safari on macOS. - */ - -[type="search"]::-webkit-search-cancel-button, -[type="search"]::-webkit-search-decoration { - -webkit-appearance: none; -} - -/** - * 1. Correct the inability to style clickable types in iOS and Safari. - * 2. Change font properties to `inherit` in Safari. - */ - -::-webkit-file-upload-button { - -webkit-appearance: button; /* 1 */ - font: inherit; /* 2 */ -} - -/* Interactive - ========================================================================== */ - -/* - * Add the correct display in IE 9-. - * 1. Add the correct display in Edge, IE, and Firefox. - */ - -details, /* 1 */ -menu { - display: block; -} - -/* - * Add the correct display in all browsers. - */ - -summary { - display: list-item; -} - -/* Scripting - ========================================================================== */ - -/** - * Add the correct display in IE 9-. - */ - -canvas { - display: inline-block; -} - -/** - * Add the correct display in IE. - */ - -template { - display: none; -} - -/* Hidden - ========================================================================== */ - -/** - * Add the correct display in IE 10-. - */ - -[hidden] { - display: none; -} \ No newline at end of file diff --git a/_sass/_responsive.scss b/_sass/_responsive.scss deleted file mode 100644 index 4639207eb57b..000000000000 --- a/_sass/_responsive.scss +++ /dev/null @@ -1,35 +0,0 @@ -$tablet-width: 0px; -$phone-width: 0px; - - -@media (max-width: $tablet-width) { - -} - -@media (max-width: $phone-width) { - -} - -@media (max-width: 905px) { - - .primary-nav { - display: none; - } - - .col-md-5.title { - margin-bottom: 4em; - } -} - -@media (max-width: 991px) { - - #home { - .box.projects { - border-right: none; - border-bottom: 1px solid #d9d9d9; - padding-bottom: 50px; - margin-bottom: 50px; - } - } -} - diff --git a/_sass/_support.scss b/_sass/_support.scss deleted file mode 100644 index 48e3d1a2d0a1..000000000000 --- a/_sass/_support.scss +++ /dev/null @@ -1,48 +0,0 @@ - -#support { - .content { - - } - - .support-group { - margin-bottom: 45px; - - h3 { - font-size: 24px; - font-weight: 700; - margin-bottom: 20px; - } - - p { - color: #808080; - font-size: 18px; - margin-bottom: 15px; - - a { - font-weight: 700; - color: $orange; - - &:hover { - color: $red-orange; - } - } - } - - .btn { - font-size: 14px; - height: 46px; - line-height: 44px; - padding: 0 35px; - border: 2px solid #000; - color: #000; - font-weight: 700; - @include border-radius(23px); - - &:hover { - background-color: #000; - color: #fff; - } - } - } -} - diff --git a/about.md b/about.md deleted file mode 100644 index c9fb9e31dd7c..000000000000 --- a/about.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: PyTorch | About -id: about -permalink: /about/ -layout: about ---- - -PyTorch is a python package that provides two high-level features: - -- Tensor computation (like numpy) with strong GPU acceleration -- Deep Neural Networks built on a tape-based autograd system - -You can reuse your favorite python packages such as numpy, scipy and Cython to extend PyTorch when needed. - -At a granular level, PyTorch is a library that consists of the following components: - -| Package | Description | -| ------------------------ | --- | -| torch | a Tensor library like NumPy, with strong GPU support | -| torch.autograd | a tape based automatic differentiation library that supports all differentiable Tensor operations in torch | -| torch.nn | a neural networks library deeply integrated with autograd designed for maximum flexibility | -| torch.optim | an optimization package to be used with torch.nn with standard optimization methods such as SGD, RMSProp, LBFGS, Adam etc. | -| torch.multiprocessing | python multiprocessing, but with magical memory sharing of torch Tensors across processes. Useful for data loading and hogwild training. | -| torch.utils | DataLoader, Trainer and other utility functions for convenience | -| torch.legacy(.nn/.optim) | legacy code that has been ported over from torch for backward compatibility reasons | - -Usually one uses PyTorch either as: - -- A replacement for numpy to use the power of GPUs. -- a deep learning research platform that provides maximum flexibility and speed - -Elaborating further: - -### A GPU-ready Tensor library - -If you use numpy, then you have used Tensors (a.k.a ndarray). - -![tensor_illustration](/static/img/tensor_illustration.png) - -PyTorch provides Tensors that can live either on the CPU or the GPU, and accelerate -compute by a huge amount. - -We provide a wide variety of tensor routines to accelerate and fit your scientific computation needs -such as slicing, indexing, math operations, linear algebra, reductions. -And they are fast! - -### Dynamic Neural Networks: Tape based Autograd - -PyTorch has a unique way of building neural networks: using and replaying a tape recorder. - -Most frameworks such as `TensorFlow`, `Theano`, `Caffe` and `CNTK` have a static view of the world. -One has to build a neural network, and reuse the same structure again and again. -Changing the way the network behaves means that one has to start from scratch. - -With PyTorch, we use a technique called Reverse-mode auto-differentiation, which allows you to -change the way your network behaves arbitrarily with zero lag or overhead. Our inspiration comes -from several research papers on this topic, as well as current and past work such as -[autograd](https://github.com/twitter/torch-autograd), -[autograd](https://github.com/HIPS/autograd), -[Chainer](http://chainer.org), etc. - -While this technique is not unique to PyTorch, it's one of the fastest implementations of it to date. -You get the best of speed and flexibility for your crazy research. - -![dynamic_graph](/static/img/dynamic_graph.gif) - -### Python first - -PyTorch is not a Python binding into a monolothic C++ framework. -It is built to be deeply integrated into Python. -You can use it naturally like you would use numpy / scipy / scikit-learn etc. -You can write your new neural network layers in Python itself, using your favorite libraries -and use packages such as Cython and Numba. -Our goal is to not reinvent the wheel where appropriate. - -### Imperative experiences - -PyTorch is designed to be intuitive, linear in thought and easy to use. -When you execute a line of code, it gets executed. There isn't an asynchronous view of the world. -When you drop into a debugger, or receive error messages and stack traces, understanding them is straight-forward. -The stack-trace points to exactly where your code was defined. -We hope you never spend hours debugging your code because of bad stack traces or asynchronous and opaque execution engines. - -### Fast and Lean - -PyTorch has minimal framework overhead. We integrate acceleration libraries -such as Intel MKL and NVIDIA (CuDNN, NCCL) to maximize speed. -At the core, it's CPU and GPU Tensor and Neural Network backends -(TH, THC, THNN, THCUNN) are written as independent libraries with a C99 API. -They are mature and have been tested for years. - -Hence, PyTorch is quite fast -- whether you run small or large neural networks. - -The memory usage in PyTorch is extremely efficient compared to Torch or some of the alternatives. -We've written custom memory allocators for the GPU to make sure that -your deep learning models are maximally memory efficient. -This enables you to train bigger deep learning models than before. - -### Extensions without pain - -Writing new neural network modules, or interfacing with PyTorch's Tensor API was designed to be straight-forward -and with minimal abstractions. - -You can write new neural network layers in Python using the torch API -[or your favorite numpy based libraries such as SciPy](https://github.com/pytorch/tutorials/blob/master/Creating%20extensions%20using%20numpy%20and%20scipy.ipynb) - -If you want to write your layers in C/C++, we provide an extension API based on -[cffi](http://cffi.readthedocs.io/en/latest/) that is efficient and with minimal boilerplate. -There is no wrapper code that needs to be written. [You can see an example here](https://github.com/pytorch/extension-ffi). - diff --git a/api/0.1.3/en/fonts/slate.eot b/api/0.1.3/en/fonts/slate.eot deleted file mode 100755 index 13c4839a1975..000000000000 Binary files a/api/0.1.3/en/fonts/slate.eot and /dev/null differ diff --git a/api/0.1.3/en/fonts/slate.svg b/api/0.1.3/en/fonts/slate.svg deleted file mode 100644 index 5f34982306bc..000000000000 --- a/api/0.1.3/en/fonts/slate.svg +++ /dev/null @@ -1,14 +0,0 @@ - - - -Generated by IcoMoon - - - - - - - - - - diff --git a/api/0.1.3/en/fonts/slate.ttf b/api/0.1.3/en/fonts/slate.ttf deleted file mode 100755 index ace9a46a7e1e..000000000000 Binary files a/api/0.1.3/en/fonts/slate.ttf and /dev/null differ diff --git a/api/0.1.3/en/fonts/slate.woff b/api/0.1.3/en/fonts/slate.woff deleted file mode 100755 index 1e72e0ee0018..000000000000 Binary files a/api/0.1.3/en/fonts/slate.woff and /dev/null differ diff --git a/api/0.1.3/en/fonts/slate.woff2 b/api/0.1.3/en/fonts/slate.woff2 deleted file mode 100755 index 7c585a727375..000000000000 Binary files a/api/0.1.3/en/fonts/slate.woff2 and /dev/null differ diff --git a/api/0.1.3/en/image/abs.png b/api/0.1.3/en/image/abs.png deleted file mode 100644 index fa7f47024fd9..000000000000 Binary files a/api/0.1.3/en/image/abs.png and /dev/null differ diff --git a/api/0.1.3/en/image/elu.png b/api/0.1.3/en/image/elu.png deleted file mode 100644 index a12873ff63e7..000000000000 Binary files a/api/0.1.3/en/image/elu.png and /dev/null differ diff --git a/api/0.1.3/en/image/exp.png b/api/0.1.3/en/image/exp.png deleted file mode 100644 index 07d28d4b0ddf..000000000000 Binary files a/api/0.1.3/en/image/exp.png and /dev/null differ diff --git a/api/0.1.3/en/image/hshrink.png b/api/0.1.3/en/image/hshrink.png deleted file mode 100644 index 7f96292c8cc4..000000000000 Binary files a/api/0.1.3/en/image/hshrink.png and /dev/null differ diff --git a/api/0.1.3/en/image/htanh.png b/api/0.1.3/en/image/htanh.png deleted file mode 100644 index c8e6084752d6..000000000000 Binary files a/api/0.1.3/en/image/htanh.png and /dev/null differ diff --git a/api/0.1.3/en/image/logsigmoid.png b/api/0.1.3/en/image/logsigmoid.png deleted file mode 100644 index f632ed8dd09f..000000000000 Binary files a/api/0.1.3/en/image/logsigmoid.png and /dev/null differ diff --git a/api/0.1.3/en/image/logsoftmax.png b/api/0.1.3/en/image/logsoftmax.png deleted file mode 100644 index dec5be5ae82d..000000000000 Binary files a/api/0.1.3/en/image/logsoftmax.png and /dev/null differ diff --git a/api/0.1.3/en/image/power.png b/api/0.1.3/en/image/power.png deleted file mode 100644 index 958eeb42e2eb..000000000000 Binary files a/api/0.1.3/en/image/power.png and /dev/null differ diff --git a/api/0.1.3/en/image/prelu.png b/api/0.1.3/en/image/prelu.png deleted file mode 100644 index ac751cdfa86b..000000000000 Binary files a/api/0.1.3/en/image/prelu.png and /dev/null differ diff --git a/api/0.1.3/en/image/relu.png b/api/0.1.3/en/image/relu.png deleted file mode 100644 index d60d2abe99a7..000000000000 Binary files a/api/0.1.3/en/image/relu.png and /dev/null differ diff --git a/api/0.1.3/en/image/relu6.png b/api/0.1.3/en/image/relu6.png deleted file mode 100644 index 0a88563288bf..000000000000 Binary files a/api/0.1.3/en/image/relu6.png and /dev/null differ diff --git a/api/0.1.3/en/image/rrelu.png b/api/0.1.3/en/image/rrelu.png deleted file mode 100644 index 50e34831d4b1..000000000000 Binary files a/api/0.1.3/en/image/rrelu.png and /dev/null differ diff --git a/api/0.1.3/en/image/sigmmoid.png b/api/0.1.3/en/image/sigmmoid.png deleted file mode 100644 index 48aad7ef479b..000000000000 Binary files a/api/0.1.3/en/image/sigmmoid.png and /dev/null differ diff --git a/api/0.1.3/en/image/sigmoid.png b/api/0.1.3/en/image/sigmoid.png deleted file mode 100644 index 48aad7ef479b..000000000000 Binary files a/api/0.1.3/en/image/sigmoid.png and /dev/null differ diff --git a/api/0.1.3/en/image/softmax.png b/api/0.1.3/en/image/softmax.png deleted file mode 100644 index 29c55348994d..000000000000 Binary files a/api/0.1.3/en/image/softmax.png and /dev/null differ diff --git a/api/0.1.3/en/image/softmin.png b/api/0.1.3/en/image/softmin.png deleted file mode 100644 index d1807a4c7194..000000000000 Binary files a/api/0.1.3/en/image/softmin.png and /dev/null differ diff --git a/api/0.1.3/en/image/softplus.png b/api/0.1.3/en/image/softplus.png deleted file mode 100644 index 9132093b30a0..000000000000 Binary files a/api/0.1.3/en/image/softplus.png and /dev/null differ diff --git a/api/0.1.3/en/image/softsign.png b/api/0.1.3/en/image/softsign.png deleted file mode 100644 index 08054338e3aa..000000000000 Binary files a/api/0.1.3/en/image/softsign.png and /dev/null differ diff --git a/api/0.1.3/en/image/sqrt.png b/api/0.1.3/en/image/sqrt.png deleted file mode 100644 index 29b1d4215eb6..000000000000 Binary files a/api/0.1.3/en/image/sqrt.png and /dev/null differ diff --git a/api/0.1.3/en/image/square.png b/api/0.1.3/en/image/square.png deleted file mode 100644 index c191eafc048a..000000000000 Binary files a/api/0.1.3/en/image/square.png and /dev/null differ diff --git a/api/0.1.3/en/image/sshrink.png b/api/0.1.3/en/image/sshrink.png deleted file mode 100644 index 99c5d11712fe..000000000000 Binary files a/api/0.1.3/en/image/sshrink.png and /dev/null differ diff --git a/api/0.1.3/en/image/tanh.png b/api/0.1.3/en/image/tanh.png deleted file mode 100644 index d2f77aac1194..000000000000 Binary files a/api/0.1.3/en/image/tanh.png and /dev/null differ diff --git a/api/0.1.3/en/image/tmpimage/abs.png b/api/0.1.3/en/image/tmpimage/abs.png deleted file mode 100644 index fa7f47024fd9..000000000000 Binary files a/api/0.1.3/en/image/tmpimage/abs.png and /dev/null differ diff --git a/api/0.1.3/en/image/tmpimage/elu.png b/api/0.1.3/en/image/tmpimage/elu.png deleted file mode 100644 index a12873ff63e7..000000000000 Binary files a/api/0.1.3/en/image/tmpimage/elu.png and /dev/null differ diff --git a/api/0.1.3/en/image/tmpimage/exp.png b/api/0.1.3/en/image/tmpimage/exp.png deleted file mode 100644 index 07d28d4b0ddf..000000000000 Binary files a/api/0.1.3/en/image/tmpimage/exp.png and /dev/null differ diff --git a/api/0.1.3/en/image/tmpimage/hshrink.png b/api/0.1.3/en/image/tmpimage/hshrink.png deleted file mode 100644 index 7f96292c8cc4..000000000000 Binary files a/api/0.1.3/en/image/tmpimage/hshrink.png and /dev/null differ diff --git a/api/0.1.3/en/image/tmpimage/htanh.png b/api/0.1.3/en/image/tmpimage/htanh.png deleted file mode 100644 index c8e6084752d6..000000000000 Binary files a/api/0.1.3/en/image/tmpimage/htanh.png and /dev/null differ diff --git a/api/0.1.3/en/image/tmpimage/logsigmoid.png b/api/0.1.3/en/image/tmpimage/logsigmoid.png deleted file mode 100644 index f632ed8dd09f..000000000000 Binary files a/api/0.1.3/en/image/tmpimage/logsigmoid.png and /dev/null differ diff --git a/api/0.1.3/en/image/tmpimage/logsoftmax.png b/api/0.1.3/en/image/tmpimage/logsoftmax.png deleted file mode 100644 index dec5be5ae82d..000000000000 Binary files a/api/0.1.3/en/image/tmpimage/logsoftmax.png and /dev/null differ diff --git a/api/0.1.3/en/image/tmpimage/power.png b/api/0.1.3/en/image/tmpimage/power.png deleted file mode 100644 index 958eeb42e2eb..000000000000 Binary files a/api/0.1.3/en/image/tmpimage/power.png and /dev/null differ diff --git a/api/0.1.3/en/image/tmpimage/prelu.png b/api/0.1.3/en/image/tmpimage/prelu.png deleted file mode 100644 index ac751cdfa86b..000000000000 Binary files a/api/0.1.3/en/image/tmpimage/prelu.png and /dev/null differ diff --git a/api/0.1.3/en/image/tmpimage/relu.png b/api/0.1.3/en/image/tmpimage/relu.png deleted file mode 100644 index d60d2abe99a7..000000000000 Binary files a/api/0.1.3/en/image/tmpimage/relu.png and /dev/null differ diff --git a/api/0.1.3/en/image/tmpimage/relu6.png b/api/0.1.3/en/image/tmpimage/relu6.png deleted file mode 100644 index 0a88563288bf..000000000000 Binary files a/api/0.1.3/en/image/tmpimage/relu6.png and /dev/null differ diff --git a/api/0.1.3/en/image/tmpimage/rrelu.png b/api/0.1.3/en/image/tmpimage/rrelu.png deleted file mode 100644 index 50e34831d4b1..000000000000 Binary files a/api/0.1.3/en/image/tmpimage/rrelu.png and /dev/null differ diff --git a/api/0.1.3/en/image/tmpimage/sigmmoid.png b/api/0.1.3/en/image/tmpimage/sigmmoid.png deleted file mode 100644 index 48aad7ef479b..000000000000 Binary files a/api/0.1.3/en/image/tmpimage/sigmmoid.png and /dev/null differ diff --git a/api/0.1.3/en/image/tmpimage/sigmoid.png b/api/0.1.3/en/image/tmpimage/sigmoid.png deleted file mode 100644 index 48aad7ef479b..000000000000 Binary files a/api/0.1.3/en/image/tmpimage/sigmoid.png and /dev/null differ diff --git a/api/0.1.3/en/image/tmpimage/softmax.png b/api/0.1.3/en/image/tmpimage/softmax.png deleted file mode 100644 index 29c55348994d..000000000000 Binary files a/api/0.1.3/en/image/tmpimage/softmax.png and /dev/null differ diff --git a/api/0.1.3/en/image/tmpimage/softmin.png b/api/0.1.3/en/image/tmpimage/softmin.png deleted file mode 100644 index d1807a4c7194..000000000000 Binary files a/api/0.1.3/en/image/tmpimage/softmin.png and /dev/null differ diff --git a/api/0.1.3/en/image/tmpimage/softplus.png b/api/0.1.3/en/image/tmpimage/softplus.png deleted file mode 100644 index 9132093b30a0..000000000000 Binary files a/api/0.1.3/en/image/tmpimage/softplus.png and /dev/null differ diff --git a/api/0.1.3/en/image/tmpimage/softsign.png b/api/0.1.3/en/image/tmpimage/softsign.png deleted file mode 100644 index 08054338e3aa..000000000000 Binary files a/api/0.1.3/en/image/tmpimage/softsign.png and /dev/null differ diff --git a/api/0.1.3/en/image/tmpimage/sqrt.png b/api/0.1.3/en/image/tmpimage/sqrt.png deleted file mode 100644 index 29b1d4215eb6..000000000000 Binary files a/api/0.1.3/en/image/tmpimage/sqrt.png and /dev/null differ diff --git a/api/0.1.3/en/image/tmpimage/square.png b/api/0.1.3/en/image/tmpimage/square.png deleted file mode 100644 index c191eafc048a..000000000000 Binary files a/api/0.1.3/en/image/tmpimage/square.png and /dev/null differ diff --git a/api/0.1.3/en/image/tmpimage/sshrink.png b/api/0.1.3/en/image/tmpimage/sshrink.png deleted file mode 100644 index 99c5d11712fe..000000000000 Binary files a/api/0.1.3/en/image/tmpimage/sshrink.png and /dev/null differ diff --git a/api/0.1.3/en/image/tmpimage/tanh.png b/api/0.1.3/en/image/tmpimage/tanh.png deleted file mode 100644 index d2f77aac1194..000000000000 Binary files a/api/0.1.3/en/image/tmpimage/tanh.png and /dev/null differ diff --git a/api/0.1.3/en/images/logo.png b/api/0.1.3/en/images/logo.png deleted file mode 100644 index d8c647ed0598..000000000000 Binary files a/api/0.1.3/en/images/logo.png and /dev/null differ diff --git a/api/0.1.3/en/images/navbar.png b/api/0.1.3/en/images/navbar.png deleted file mode 100644 index df38e90d87e1..000000000000 Binary files a/api/0.1.3/en/images/navbar.png and /dev/null differ diff --git a/api/0.1.3/en/includes/nn b/api/0.1.3/en/includes/nn deleted file mode 100644 index b4a25ffc2333..000000000000 --- a/api/0.1.3/en/includes/nn +++ /dev/null @@ -1,1202 +0,0 @@ -

torch.nn

- -

BatchNorm2d

- -

Applies Batch Normalization over a 4d input that is seen as a mini-batch of 3d inputs

-
              x - mean(x)
-y =  ----------------------------- * gamma + beta
-      standard_deviation(x) + eps
-
-
# With Learnable Parameters
-m = nn.BatchNorm2d(100)
-# Without Learnable Parameters
-m = nn.BatchNorm2d(100, affine=False)
-input = autograd.Variable(torch.randn(20, 100, 35, 45))
-output = m.forward(input)
-
- -

The mean and standard-deviation are calculated per-dimension over -the mini-batches and gamma and beta are learnable parameter vectors -of size N (where N is the input size).

- -

During training, this layer keeps a running estimate of its computed mean -and variance. The running sum is kept with a default momentum of 0.1 -During evaluation, this running mean/variance is used for normalization.

- -

Constructor Arguments

- - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterDefaultDescription
num_featuresnum_features from an expected input of size batch_size x num_features x height x width
eps1e-5a value added to the denominator for numerical stability.
momentum0.1the value used for the running_mean and running_var computation.
affinea boolean value that when set to true, gives the layer learnable affine parameters.
- -

Expected Shape

- - - - - - - - - - - - - - - - - - -
ShapeDescription
input[ * , num_features , *, * ]4D Tensor of batch_size x num_features x height x width
outputSameOutput has the same shape as input
- -

Returns

- -

a normalized tensor in the batch dimension

- -

Container

- -

This is the base container class for all neural networks you would define.

-
# Example of using Container
- class Net(nn.Container):
-    def __init__(self):
-        super(Net, self).__init__(
-            conv1 = nn.Conv2d(1, 20, 5),
-            relu  = nn.ReLU()
-         )
-    def __call__(self, input):
-        output = self.relu(self.conv1(x))
-        return output
- model = Net()
-
-
# one can add modules to the container after construction
-model.add_module('pool1', nn.MaxPool2d(2, 2)
-
- -

You will subclass your container from this class. -In the constructor you define the modules that you would want to use, -and in the call function you use the constructed modules in -your operations.

- -

To make it easier to understand, given is a small example.

- -

One can also add new modules to a container after construction. -You can do this with the add_module function.

- -

The container has one additional method parameters() which -returns the list of learnable parameters in the container instance.

- -

LogSoftmax

- -

Applies the Log(Softmax(x)) function to an n-dimensional input Tensor.

-
m = nn.LogSoftmax()
-input = autograd.Variable(torch.randn(2, 3))
-print(input)
-print(m.forward(input))
-
- -

The LogSoftmax formulation can be simplified as - f_i(x) = log(1 / a * exp(x_i)) where a = sum_j exp(x_j) .

- -

Expected Shape

- - - - - - - - - - - - - - - - - - -
ShapeDescription
input[ * , * ]2D Tensor of any size
outputSameOutput has the same shape as input
- -

Returns

- -

a Tensor of the same dimension and shape as the input with - values in the range [-inf, 0)

- -

- -

ReLU

- -

Applies the rectified linear unit function element-wise ReLU(x)= max(0,x)

-
m = nn.ReLU()
-input = autograd.Variable(torch.randn(2))
-print(input)
-print(m.forward(input))
-
- -

Constructor Arguments

- - - - - - - - - - - - - -
ParameterDefaultDescription
inplacecan optionally do the operation in-place
- -

Expected Shape

- - - - - - - - - - - - - - - - - - -
ShapeDescription
inputAnyTensor of any size and dimension
outputSameOutput has the same shape as input
- -

Returns

- -

a Tensor of the same dimension and shape as the input

- -

- -

LogSigmoid

- -

Applies element-wise LogSigmoid(x) = log( 1 / (1 + exp(-x_i)))

-
m = nn.LogSigmoid()
-input = autograd.Variable(torch.randn(2))
-print(input)
-print(m.forward(input))
-
- -

Expected Shape

- - - - - - - - - - - - - - - - - - -
ShapeDescription
inputAnyTensor of any size and dimension
outputSameOutput has the same shape as input
- -

Returns

- -

a Tensor of the same dimension and shape as the input

- -

- -

PReLU

- -

Applies element-wise the function PReLU(x) = max(0,x) + a * min(0,x)

-
m = nn.PReLU()
-input = autograd.Variable(torch.randn(2))
-print(input)
-print(m.forward(input))
-
- -

Here “a” is a learnable parameter. -When called without arguments, nn.PReLU() uses a single parameter “a” -across all input channels. If called with nn.PReLU(nChannels), a separate -“a” is used for each input channel. -Note that weight decay should not be used when learning “a” for good -performance.

- -

Constructor Arguments

- - - - - - - - - - - - - - - - - - -
ParameterDefaultDescription
num_parameters1number of “a” to learn.
init0.25the initial value of “a”.
- -

Expected Shape

- - - - - - - - - - - - - - - - - - -
ShapeDescription
inputAnyTensor of any size and dimension
outputSameOutput has the same shape as input
- -

Returns

- -

a Tensor of the same dimension and shape as the input

- -

- -

Softmax2d

- -

Applies SoftMax over features to each spatial location

-
m = nn.Softmax2d()
-# you softmax over the 2nd dimension
-input = autograd.Variable(torch.randn(2, 3, 12, 13))
-print(input)
-print(m.forward(input))
-
- -

When given an image of Channels x Height x Width, it will -apply Softmax to each location [Channels, h_i, w_j]

- -

Expected Shape

- - - - - - - - - - - - - - - - - - -
ShapeDescription
input[ * , * , * , * ]4D Tensor of any size
outputSameOutput has the same shape as input
- -

Returns

- -

a Tensor of the same dimension and shape as the input with - values in the range [0, 1]

- -

ReLU6

- -

Applies the element-wise function ReLU6(x) = min( max(0,x), 6)

-
m = nn.ReLU6()
-input = autograd.Variable(torch.randn(2))
-print(input)
-print(m.forward(input))
-
- -

Constructor Arguments

- - - - - - - - - - - - - -
ParameterDefaultDescription
inplacecan optionally do the operation in-place
- -

Expected Shape

- - - - - - - - - - - - - - - - - - -
ShapeDescription
inputAnyTensor of any size and dimension
outputSameOutput has the same shape as input
- -

Returns

- -

a Tensor of the same dimension and shape as the input

- -

- -

BatchNorm3d

- -

Applies Batch Normalization over a 5d input that is seen as a mini-batch of 4d inputs

-
              x - mean(x)
-y =  ----------------------------- * gamma + beta
-      standard_deviation(x) + eps
-
-
# With Learnable Parameters
-m = nn.BatchNorm3d(100)
-# Without Learnable Parameters
-m = nn.BatchNorm3d(100, affine=False)
-input = autograd.Variable(torch.randn(20, 100, 35, 45, 10))
-output = m.forward(input)
-
- -

The mean and standard-deviation are calculated per-dimension over -the mini-batches and gamma and beta are learnable parameter vectors -of size N (where N is the input size).

- -

During training, this layer keeps a running estimate of its computed mean -and variance. The running sum is kept with a default momentum of 0.1 -During evaluation, this running mean/variance is used for normalization.

- -

Constructor Arguments

- - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterDefaultDescription
num_featuresnum_features from an expected input of size batch_size x num_features x height x width
eps1e-5a value added to the denominator for numerical stability.
momentum0.1the value used for the running_mean and running_var computation.
affinea boolean value that when set to true, gives the layer learnable affine parameters.
- -

Expected Shape

- - - - - - - - - - - - - - - - - - -
ShapeDescription
input[ * , num_features , * , * , * ]5D Tensor of batch_size x num_features x depth x height x width
outputSameOutput has the same shape as input
- -

Returns

- -

a normalized tensor in the batch dimension

- -

Tanh

- -

Applies element-wise, Tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x))

-
m = nn.Tanh()
-input = autograd.Variable(torch.randn(2))
-print(input)
-print(m.forward(input))
-
- -

Expected Shape

- - - - - - - - - - - - - - - - - - -
ShapeDescription
inputAnyTensor of any size and dimension
outputSameOutput has the same shape as input
- -

Returns

- -

a Tensor of the same dimension and shape as the input

- -

- -

Softplus

- -

Applies element-wise SoftPlus(x) = 1/beta * log(1 + exp(beta * x_i))

-
m = nn.Softplus()
-input = autograd.Variable(torch.randn(2))
-print(input)
-print(m.forward(input))
-
- -

SoftPlus is a smooth approximation to the ReLU function and can be used -to constrain the output of a machine to always be positive. -For numerical stability the implementation reverts to the linear function -for inputs above a certain value.

- -

Constructor Arguments

- - - - - - - - - - - - - - - - - - -
ParameterDefaultDescription
beta1the beta value for the Softplus formulation.
threshold20values above this revert to a linear function.
- -

Expected Shape

- - - - - - - - - - - - - - - - - - -
ShapeDescription
inputAnyTensor of any size and dimension
outputSameOutput has the same shape as input
- -

Returns

- -

a Tensor of the same dimension and shape as the input

- -

- -

Threshold

- -

Thresholds each element of the input Tensor

-
m = nn.Threshold(0.1, 20)
-input = Variable(torch.randn(2))
-print(input)
-print(m.forward(input))
-
- -

Threshold is defined as: - y = x if x >= threshold - value if x < threshold

- -

Constructor Arguments

- - - - - - - - - - - - - - - - - - - - - - - -
ParameterDefaultDescription
thresholdThe value to threshold at
valueThe value to replace with
inplacecan optionally do the operation in-place
- -

Expected Shape

- - - - - - - - - - - - - - - - - - -
ShapeDescription
inputAnyTensor of any size and dimension
outputSameOutput has the same shape as input
- -

Returns

- -

Tensor of same dimension and shape as the input

- -

Softmin

- -

Applies the Softmin function to an n-dimensional input Tensor

-
m = nn.Softmin()
-input = autograd.Variable(torch.randn(2, 3))
-print(input)
-print(m.forward(input))
-
- -

rescaling them so that the elements of the n-dimensional output Tensor -lie in the range (0,1) and sum to 1 -Softmin(x) = exp(-x_i - shift) / sum_j exp(-x_j - shift) - where shift = max_i - x_i

- -

Expected Shape

- - - - - - - - - - - - - - - - - - -
ShapeDescription
input[ * , * ]2D Tensor of any size
outputSameOutput has the same shape as input
- -

Returns

- -

a Tensor of the same dimension and shape as the input, with - values in the range [0, 1]

- -

- -

Softshrink

- -

Applies the soft shrinkage function elementwise

-
m = nn.Softshrink()
-input = autograd.Variable(torch.randn(2))
-print(input)
-print(m.forward(input))
-
- -

SoftShrinkage operator is defined as: - f(x) = x-lambda, if x > lambda > f(x) = x+lambda, if x < -lambda - f(x) = 0, otherwise

- -

Constructor Arguments

- - - - - - - - - - - - - -
ParameterDefaultDescription
lambd0.5the lambda value for the Softshrink formulation.
- -

Expected Shape

- - - - - - - - - - - - - - - - - - -
ShapeDescription
inputAnyTensor of any size and dimension
outputSameOutput has the same shape as input
- -

Returns

- -

a Tensor of the same dimension and shape as the input

- -

- -

BatchNorm1d

- -

Applies Batch Normalization over a 2d input that is seen as a mini-batch of 1d inputs

-
              x - mean(x)
-y =  ----------------------------- * gamma + beta
-      standard_deviation(x) + eps
-
-
# With Learnable Parameters
-m = nn.BatchNorm1d(100)
-# Without Learnable Parameters
-m = nn.BatchNorm1d(100, affine=False)
-input = autograd.Variable(torch.randn(20, 100))
-output = m.forward(input)
-
- -

The mean and standard-deviation are calculated per-dimension over -the mini-batches and gamma and beta are learnable parameter vectors -of size N (where N is the input size).

- -

During training, this layer keeps a running estimate of its computed mean -and variance. The running sum is kept with a default momentum of 0.1 -During evaluation, this running mean/variance is used for normalization.

- -

Constructor Arguments

- - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ParameterDefaultDescription
num_featuresthe size of each 1D input in the mini-batch
eps1e-5a value added to the denominator for numerical stability.
momentum0.1the value used for the running_mean and running_var computation.
affinea boolean value that when set to true, gives the layer learnable affine parameters.
- -

Expected Shape

- - - - - - - - - - - - - - - - - - -
ShapeDescription
input[ * , num_features ]2D Tensor of nBatches x num_features
outputSameOutput has the same shape as input
- -

Returns

- -

a normalized tensor in the batch dimension

- -

ELU

- -

Applies element-wise, ELU(x) = max(0,x) + min(0, alpha * (exp(x) - 1))

-
m = nn.ELU()
-input = autograd.Variable(torch.randn(2))
-print(input)
-print(m.forward(input))
-
- -

Constructor Arguments

- - - - - - - - - - - - - - - - - - -
ParameterDefaultDescription
alpha1.0the alpha value for the ELU formulation.
inplacecan optionally do the operation in-place
- -

Expected Shape

- - - - - - - - - - - - - - - - - - -
ShapeDescription
inputAnyTensor of any size and dimension
outputSameOutput has the same shape as input
- -

Returns

- -

a Tensor of the same dimension and shape as the input

- -

- -

Hardshrink

- -

Applies the hard shrinkage function element-wise

-
m = nn.Hardshrink()
-input = autograd.Variable(torch.randn(2))
-print(input)
-print(m.forward(input))
-
- -

Hardshrink is defined as f(x) = x, if x > lambda - f(x) = x, if x < -lambda - f(x) = 0, otherwise

- -

Constructor Arguments

- - - - - - - - - - - - - -
ParameterDefaultDescription
lambd0.5the lambda value for the Hardshrink formulation.
- -

Expected Shape

- - - - - - - - - - - - - - - - - - -
ShapeDescription
inputAnyTensor of any size and dimension
outputSameOutput has the same shape as input
- -

Returns

- -

a Tensor of the same dimension and shape as the input

- -

- -

Hardtanh

- -

Applies the HardTanh function element-wise

-
m = nn.HardTanh(-2, 2)
-input = autograd.Variable(torch.randn(2))
-print(input)
-print(m.forward(input))
-
- -

HardTanh is defined as: - f(x) = +1, if x > 1 - f(x) = -1, if x < -1 - f(x) = x, otherwise -The range of the linear region [-1, 1] can be adjusted

- -

Constructor Arguments

- - - - - - - - - - - - - - - - - - - - - - - -
ParameterDefaultDescription
min_valueminimum value of the linear region range
max_valuemaximum value of the linear region range
inplacecan optionally do the operation in-place
- -

Expected Shape

- - - - - - - - - - - - - - - - - - -
ShapeDescription
inputAnyTensor of any size and dimension
outputSameOutput has the same shape as input
- -

Returns

- -

a Tensor of the same dimension and shape as the input

- -

- -

Softsign

- -

Applies element-wise, the function Softsign(x) = x / (1 + |x|)

-
m = nn.Softsign()
-input = autograd.Variable(torch.randn(2))
-print(input)
-print(m.forward(input))
-
- -

Expected Shape

- - - - - - - - - - - - - - - - - - -
ShapeDescription
inputAnyTensor of any size and dimension
outputSameOutput has the same shape as input
- -

Returns

- -

a Tensor of the same dimension and shape as the input

- -

- -

LeakyReLU

- -

Applies element-wise, f(x) = max(0, x) + negative_slope * min(0, x)

-
m = nn.LeakyReLU(0.1)
-input = autograd.Variable(torch.randn(2))
-print(input)
-print(m.forward(input))
-
- -

Constructor Arguments

- - - - - - - - - - - - - - - - - - -
ParameterDefaultDescription
negative_slope1e-2Controls the angle of the negative slope.
inplacecan optionally do the operation in-place
- -

Expected Shape

- - - - - - - - - - - - - - - - - - -
ShapeDescription
inputAnyTensor of any size and dimension
outputSameOutput has the same shape as input
- -

Returns

- -

a Tensor of the same dimension and shape as the input

- -

Sigmoid

- -

Applies the element-wise function sigmoid(x) = 1 / ( 1 + exp(-x))

-
m = nn.Sigmoid()
-input = autograd.Variable(torch.randn(2))
-print(input)
-print(m.forward(input))
-
- -

Expected Shape

- - - - - - - - - - - - - - - - - - -
ShapeDescription
inputAnyTensor of any size and dimension
outputSameOutput has the same shape as input
- -

Returns

- -

a Tensor of the same dimension and shape as the input

- -

- -

Tanhshrink

- -

Applies element-wise, Tanhshrink(x) = x - Tanh(x)

-
m = nn.Tanhshrink()
-input = autograd.Variable(torch.randn(2))
-print(input)
-print(m.forward(input))
-
- -

Expected Shape

- - - - - - - - - - - - - - - - - - -
ShapeDescription
inputAnyTensor of any size and dimension
outputSameOutput has the same shape as input
- -

Returns

- -

a Tensor of the same dimension and shape as the input

- -

Softmax

- -

Applies the Softmax function to an n-dimensional input Tensor

-
m = nn.Softmax()
-input = autograd.Variable(torch.randn(2, 3))
-print(input)
-print(m.forward(input))
-
- -

rescaling them so that the elements of the n-dimensional output Tensor -lie in the range (0,1) and sum to 1

- -

Softmax is defined as f_i(x) = exp(x_i - shift) / sum_j exp(x_j - shift) - where shift = max_i x_i

- -

Expected Shape

- - - - - - - - - - - - - - - - - - -
ShapeDescription
input[ * , * ]2D Tensor of any size
outputSameOutput has the same shape as input
- -

Returns

- -

a Tensor of the same dimension and shape as the input with - values in the range [0, 1]

- -

-Notes: - Note that this module doesn’t work directly with NLLLoss, - which expects the Log to be computed between the Softmax and itself. - Use Logsoftmax instead (it’s faster).

diff --git a/api/0.1.3/en/index.html b/api/0.1.3/en/index.html deleted file mode 100644 index 48b37bd8e6e9..000000000000 --- a/api/0.1.3/en/index.html +++ /dev/null @@ -1,1333 +0,0 @@ - - - - - - - PyTorch API Reference - - - - - - - - - - - NAV - - - -
- -
-
- -
    -
    -
    - -
    -
    -
    -
    -

    Introduction

    - -

    Welcome to the PyTorch API Reference. -Here you will find reference documentation for the built-in PyTorch packages.

    - -

    torch.nn

    - -

    BatchNorm2d

    - -

    Applies Batch Normalization over a 4d input that is seen as a mini-batch of 3d inputs

    -
                  x - mean(x)
    -y =  ----------------------------- * gamma + beta
    -      standard_deviation(x) + eps
    -
    -
    # With Learnable Parameters
    -m = nn.BatchNorm2d(100)
    -# Without Learnable Parameters
    -m = nn.BatchNorm2d(100, affine=False)
    -input = autograd.Variable(torch.randn(20, 100, 35, 45))
    -output = m.forward(input)
    -
    - -

    The mean and standard-deviation are calculated per-dimension over -the mini-batches and gamma and beta are learnable parameter vectors -of size N (where N is the input size).

    - -

    During training, this layer keeps a running estimate of its computed mean -and variance. The running sum is kept with a default momentum of 0.1 -During evaluation, this running mean/variance is used for normalization.

    - -

    Constructor Arguments

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ParameterDefaultDescription
    num_featuresnum_features from an expected input of size batch_size x num_features x height x width
    eps1e-5a value added to the denominator for numerical stability.
    momentum0.1the value used for the running_mean and running_var computation.
    affinea boolean value that when set to true, gives the layer learnable affine parameters.
    - -

    Expected Shape

    - - - - - - - - - - - - - - - - - - -
    ShapeDescription
    input[ * , num_features , *, * ]4D Tensor of batch_size x num_features x height x width
    outputSameOutput has the same shape as input
    - -

    Returns

    - -

    a normalized tensor in the batch dimension

    - -

    Container

    - -

    This is the base container class for all neural networks you would define.

    -
    # Example of using Container
    - class Net(nn.Container):
    -    def __init__(self):
    -        super(Net, self).__init__(
    -            conv1 = nn.Conv2d(1, 20, 5),
    -            relu  = nn.ReLU()
    -         )
    -    def __call__(self, input):
    -        output = self.relu(self.conv1(x))
    -        return output
    - model = Net()
    -
    -
    # one can add modules to the container after construction
    -model.add_module('pool1', nn.MaxPool2d(2, 2)
    -
    - -

    You will subclass your container from this class. -In the constructor you define the modules that you would want to use, -and in the call function you use the constructed modules in -your operations.

    - -

    To make it easier to understand, given is a small example.

    - -

    One can also add new modules to a container after construction. -You can do this with the add_module function.

    - -

    The container has one additional method parameters() which -returns the list of learnable parameters in the container instance.

    - -

    LogSoftmax

    - -

    Applies the Log(Softmax(x)) function to an n-dimensional input Tensor.

    -
    m = nn.LogSoftmax()
    -input = autograd.Variable(torch.randn(2, 3))
    -print(input)
    -print(m.forward(input))
    -
    - -

    The LogSoftmax formulation can be simplified as - f_i(x) = log(1 / a * exp(x_i)) where a = sum_j exp(x_j) .

    - -

    Expected Shape

    - - - - - - - - - - - - - - - - - - -
    ShapeDescription
    input[ * , * ]2D Tensor of any size
    outputSameOutput has the same shape as input
    - -

    Returns

    - -

    a Tensor of the same dimension and shape as the input with - values in the range [-inf, 0)

    - -

    - -

    ReLU

    - -

    Applies the rectified linear unit function element-wise ReLU(x)= max(0,x)

    -
    m = nn.ReLU()
    -input = autograd.Variable(torch.randn(2))
    -print(input)
    -print(m.forward(input))
    -
    - -

    Constructor Arguments

    - - - - - - - - - - - - - -
    ParameterDefaultDescription
    inplacecan optionally do the operation in-place
    - -

    Expected Shape

    - - - - - - - - - - - - - - - - - - -
    ShapeDescription
    inputAnyTensor of any size and dimension
    outputSameOutput has the same shape as input
    - -

    Returns

    - -

    a Tensor of the same dimension and shape as the input

    - -

    - -

    LogSigmoid

    - -

    Applies element-wise LogSigmoid(x) = log( 1 / (1 + exp(-x_i)))

    -
    m = nn.LogSigmoid()
    -input = autograd.Variable(torch.randn(2))
    -print(input)
    -print(m.forward(input))
    -
    - -

    Expected Shape

    - - - - - - - - - - - - - - - - - - -
    ShapeDescription
    inputAnyTensor of any size and dimension
    outputSameOutput has the same shape as input
    - -

    Returns

    - -

    a Tensor of the same dimension and shape as the input

    - -

    - -

    PReLU

    - -

    Applies element-wise the function PReLU(x) = max(0,x) + a * min(0,x)

    -
    m = nn.PReLU()
    -input = autograd.Variable(torch.randn(2))
    -print(input)
    -print(m.forward(input))
    -
    - -

    Here “a” is a learnable parameter. -When called without arguments, nn.PReLU() uses a single parameter “a” -across all input channels. If called with nn.PReLU(nChannels), a separate -“a” is used for each input channel. -Note that weight decay should not be used when learning “a” for good -performance.

    - -

    Constructor Arguments

    - - - - - - - - - - - - - - - - - - -
    ParameterDefaultDescription
    num_parameters1number of “a” to learn.
    init0.25the initial value of “a”.
    - -

    Expected Shape

    - - - - - - - - - - - - - - - - - - -
    ShapeDescription
    inputAnyTensor of any size and dimension
    outputSameOutput has the same shape as input
    - -

    Returns

    - -

    a Tensor of the same dimension and shape as the input

    - -

    - -

    Softmax2d

    - -

    Applies SoftMax over features to each spatial location

    -
    m = nn.Softmax2d()
    -# you softmax over the 2nd dimension
    -input = autograd.Variable(torch.randn(2, 3, 12, 13))
    -print(input)
    -print(m.forward(input))
    -
    - -

    When given an image of Channels x Height x Width, it will -apply Softmax to each location [Channels, h_i, w_j]

    - -

    Expected Shape

    - - - - - - - - - - - - - - - - - - -
    ShapeDescription
    input[ * , * , * , * ]4D Tensor of any size
    outputSameOutput has the same shape as input
    - -

    Returns

    - -

    a Tensor of the same dimension and shape as the input with - values in the range [0, 1]

    - -

    ReLU6

    - -

    Applies the element-wise function ReLU6(x) = min( max(0,x), 6)

    -
    m = nn.ReLU6()
    -input = autograd.Variable(torch.randn(2))
    -print(input)
    -print(m.forward(input))
    -
    - -

    Constructor Arguments

    - - - - - - - - - - - - - -
    ParameterDefaultDescription
    inplacecan optionally do the operation in-place
    - -

    Expected Shape

    - - - - - - - - - - - - - - - - - - -
    ShapeDescription
    inputAnyTensor of any size and dimension
    outputSameOutput has the same shape as input
    - -

    Returns

    - -

    a Tensor of the same dimension and shape as the input

    - -

    - -

    BatchNorm3d

    - -

    Applies Batch Normalization over a 5d input that is seen as a mini-batch of 4d inputs

    -
                  x - mean(x)
    -y =  ----------------------------- * gamma + beta
    -      standard_deviation(x) + eps
    -
    -
    # With Learnable Parameters
    -m = nn.BatchNorm3d(100)
    -# Without Learnable Parameters
    -m = nn.BatchNorm3d(100, affine=False)
    -input = autograd.Variable(torch.randn(20, 100, 35, 45, 10))
    -output = m.forward(input)
    -
    - -

    The mean and standard-deviation are calculated per-dimension over -the mini-batches and gamma and beta are learnable parameter vectors -of size N (where N is the input size).

    - -

    During training, this layer keeps a running estimate of its computed mean -and variance. The running sum is kept with a default momentum of 0.1 -During evaluation, this running mean/variance is used for normalization.

    - -

    Constructor Arguments

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ParameterDefaultDescription
    num_featuresnum_features from an expected input of size batch_size x num_features x height x width
    eps1e-5a value added to the denominator for numerical stability.
    momentum0.1the value used for the running_mean and running_var computation.
    affinea boolean value that when set to true, gives the layer learnable affine parameters.
    - -

    Expected Shape

    - - - - - - - - - - - - - - - - - - -
    ShapeDescription
    input[ * , num_features , * , * , * ]5D Tensor of batch_size x num_features x depth x height x width
    outputSameOutput has the same shape as input
    - -

    Returns

    - -

    a normalized tensor in the batch dimension

    - -

    Tanh

    - -

    Applies element-wise, Tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x))

    -
    m = nn.Tanh()
    -input = autograd.Variable(torch.randn(2))
    -print(input)
    -print(m.forward(input))
    -
    - -

    Expected Shape

    - - - - - - - - - - - - - - - - - - -
    ShapeDescription
    inputAnyTensor of any size and dimension
    outputSameOutput has the same shape as input
    - -

    Returns

    - -

    a Tensor of the same dimension and shape as the input

    - -

    - -

    Softplus

    - -

    Applies element-wise SoftPlus(x) = 1/beta * log(1 + exp(beta * x_i))

    -
    m = nn.Softplus()
    -input = autograd.Variable(torch.randn(2))
    -print(input)
    -print(m.forward(input))
    -
    - -

    SoftPlus is a smooth approximation to the ReLU function and can be used -to constrain the output of a machine to always be positive. -For numerical stability the implementation reverts to the linear function -for inputs above a certain value.

    - -

    Constructor Arguments

    - - - - - - - - - - - - - - - - - - -
    ParameterDefaultDescription
    beta1the beta value for the Softplus formulation.
    threshold20values above this revert to a linear function.
    - -

    Expected Shape

    - - - - - - - - - - - - - - - - - - -
    ShapeDescription
    inputAnyTensor of any size and dimension
    outputSameOutput has the same shape as input
    - -

    Returns

    - -

    a Tensor of the same dimension and shape as the input

    - -

    - -

    Threshold

    - -

    Thresholds each element of the input Tensor

    -
    m = nn.Threshold(0.1, 20)
    -input = Variable(torch.randn(2))
    -print(input)
    -print(m.forward(input))
    -
    - -

    Threshold is defined as: - y = x if x >= threshold - value if x < threshold

    - -

    Constructor Arguments

    - - - - - - - - - - - - - - - - - - - - - - - -
    ParameterDefaultDescription
    thresholdThe value to threshold at
    valueThe value to replace with
    inplacecan optionally do the operation in-place
    - -

    Expected Shape

    - - - - - - - - - - - - - - - - - - -
    ShapeDescription
    inputAnyTensor of any size and dimension
    outputSameOutput has the same shape as input
    - -

    Returns

    - -

    Tensor of same dimension and shape as the input

    - -

    Softmin

    - -

    Applies the Softmin function to an n-dimensional input Tensor

    -
    m = nn.Softmin()
    -input = autograd.Variable(torch.randn(2, 3))
    -print(input)
    -print(m.forward(input))
    -
    - -

    rescaling them so that the elements of the n-dimensional output Tensor -lie in the range (0,1) and sum to 1 -Softmin(x) = exp(-x_i - shift) / sum_j exp(-x_j - shift) - where shift = max_i - x_i

    - -

    Expected Shape

    - - - - - - - - - - - - - - - - - - -
    ShapeDescription
    input[ * , * ]2D Tensor of any size
    outputSameOutput has the same shape as input
    - -

    Returns

    - -

    a Tensor of the same dimension and shape as the input, with - values in the range [0, 1]

    - -

    - -

    Softshrink

    - -

    Applies the soft shrinkage function elementwise

    -
    m = nn.Softshrink()
    -input = autograd.Variable(torch.randn(2))
    -print(input)
    -print(m.forward(input))
    -
    - -

    SoftShrinkage operator is defined as: - f(x) = x-lambda, if x > lambda > f(x) = x+lambda, if x < -lambda - f(x) = 0, otherwise

    - -

    Constructor Arguments

    - - - - - - - - - - - - - -
    ParameterDefaultDescription
    lambd0.5the lambda value for the Softshrink formulation.
    - -

    Expected Shape

    - - - - - - - - - - - - - - - - - - -
    ShapeDescription
    inputAnyTensor of any size and dimension
    outputSameOutput has the same shape as input
    - -

    Returns

    - -

    a Tensor of the same dimension and shape as the input

    - -

    - -

    BatchNorm1d

    - -

    Applies Batch Normalization over a 2d input that is seen as a mini-batch of 1d inputs

    -
                  x - mean(x)
    -y =  ----------------------------- * gamma + beta
    -      standard_deviation(x) + eps
    -
    -
    # With Learnable Parameters
    -m = nn.BatchNorm1d(100)
    -# Without Learnable Parameters
    -m = nn.BatchNorm1d(100, affine=False)
    -input = autograd.Variable(torch.randn(20, 100))
    -output = m.forward(input)
    -
    - -

    The mean and standard-deviation are calculated per-dimension over -the mini-batches and gamma and beta are learnable parameter vectors -of size N (where N is the input size).

    - -

    During training, this layer keeps a running estimate of its computed mean -and variance. The running sum is kept with a default momentum of 0.1 -During evaluation, this running mean/variance is used for normalization.

    - -

    Constructor Arguments

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ParameterDefaultDescription
    num_featuresthe size of each 1D input in the mini-batch
    eps1e-5a value added to the denominator for numerical stability.
    momentum0.1the value used for the running_mean and running_var computation.
    affinea boolean value that when set to true, gives the layer learnable affine parameters.
    - -

    Expected Shape

    - - - - - - - - - - - - - - - - - - -
    ShapeDescription
    input[ * , num_features ]2D Tensor of nBatches x num_features
    outputSameOutput has the same shape as input
    - -

    Returns

    - -

    a normalized tensor in the batch dimension

    - -

    ELU

    - -

    Applies element-wise, ELU(x) = max(0,x) + min(0, alpha * (exp(x) - 1))

    -
    m = nn.ELU()
    -input = autograd.Variable(torch.randn(2))
    -print(input)
    -print(m.forward(input))
    -
    - -

    Constructor Arguments

    - - - - - - - - - - - - - - - - - - -
    ParameterDefaultDescription
    alpha1.0the alpha value for the ELU formulation.
    inplacecan optionally do the operation in-place
    - -

    Expected Shape

    - - - - - - - - - - - - - - - - - - -
    ShapeDescription
    inputAnyTensor of any size and dimension
    outputSameOutput has the same shape as input
    - -

    Returns

    - -

    a Tensor of the same dimension and shape as the input

    - -

    - -

    Hardshrink

    - -

    Applies the hard shrinkage function element-wise

    -
    m = nn.Hardshrink()
    -input = autograd.Variable(torch.randn(2))
    -print(input)
    -print(m.forward(input))
    -
    - -

    Hardshrink is defined as f(x) = x, if x > lambda - f(x) = x, if x < -lambda - f(x) = 0, otherwise

    - -

    Constructor Arguments

    - - - - - - - - - - - - - -
    ParameterDefaultDescription
    lambd0.5the lambda value for the Hardshrink formulation.
    - -

    Expected Shape

    - - - - - - - - - - - - - - - - - - -
    ShapeDescription
    inputAnyTensor of any size and dimension
    outputSameOutput has the same shape as input
    - -

    Returns

    - -

    a Tensor of the same dimension and shape as the input

    - -

    - -

    Hardtanh

    - -

    Applies the HardTanh function element-wise

    -
    m = nn.HardTanh(-2, 2)
    -input = autograd.Variable(torch.randn(2))
    -print(input)
    -print(m.forward(input))
    -
    - -

    HardTanh is defined as: - f(x) = +1, if x > 1 - f(x) = -1, if x < -1 - f(x) = x, otherwise -The range of the linear region [-1, 1] can be adjusted

    - -

    Constructor Arguments

    - - - - - - - - - - - - - - - - - - - - - - - -
    ParameterDefaultDescription
    min_valueminimum value of the linear region range
    max_valuemaximum value of the linear region range
    inplacecan optionally do the operation in-place
    - -

    Expected Shape

    - - - - - - - - - - - - - - - - - - -
    ShapeDescription
    inputAnyTensor of any size and dimension
    outputSameOutput has the same shape as input
    - -

    Returns

    - -

    a Tensor of the same dimension and shape as the input

    - -

    - -

    Softsign

    - -

    Applies element-wise, the function Softsign(x) = x / (1 + |x|)

    -
    m = nn.Softsign()
    -input = autograd.Variable(torch.randn(2))
    -print(input)
    -print(m.forward(input))
    -
    - -

    Expected Shape

    - - - - - - - - - - - - - - - - - - -
    ShapeDescription
    inputAnyTensor of any size and dimension
    outputSameOutput has the same shape as input
    - -

    Returns

    - -

    a Tensor of the same dimension and shape as the input

    - -

    - -

    LeakyReLU

    - -

    Applies element-wise, f(x) = max(0, x) + negative_slope * min(0, x)

    -
    m = nn.LeakyReLU(0.1)
    -input = autograd.Variable(torch.randn(2))
    -print(input)
    -print(m.forward(input))
    -
    - -

    Constructor Arguments

    - - - - - - - - - - - - - - - - - - -
    ParameterDefaultDescription
    negative_slope1e-2Controls the angle of the negative slope.
    inplacecan optionally do the operation in-place
    - -

    Expected Shape

    - - - - - - - - - - - - - - - - - - -
    ShapeDescription
    inputAnyTensor of any size and dimension
    outputSameOutput has the same shape as input
    - -

    Returns

    - -

    a Tensor of the same dimension and shape as the input

    - -

    Sigmoid

    - -

    Applies the element-wise function sigmoid(x) = 1 / ( 1 + exp(-x))

    -
    m = nn.Sigmoid()
    -input = autograd.Variable(torch.randn(2))
    -print(input)
    -print(m.forward(input))
    -
    - -

    Expected Shape

    - - - - - - - - - - - - - - - - - - -
    ShapeDescription
    inputAnyTensor of any size and dimension
    outputSameOutput has the same shape as input
    - -

    Returns

    - -

    a Tensor of the same dimension and shape as the input

    - -

    - -

    Tanhshrink

    - -

    Applies element-wise, Tanhshrink(x) = x - Tanh(x)

    -
    m = nn.Tanhshrink()
    -input = autograd.Variable(torch.randn(2))
    -print(input)
    -print(m.forward(input))
    -
    - -

    Expected Shape

    - - - - - - - - - - - - - - - - - - -
    ShapeDescription
    inputAnyTensor of any size and dimension
    outputSameOutput has the same shape as input
    - -

    Returns

    - -

    a Tensor of the same dimension and shape as the input

    - -

    Softmax

    - -

    Applies the Softmax function to an n-dimensional input Tensor

    -
    m = nn.Softmax()
    -input = autograd.Variable(torch.randn(2, 3))
    -print(input)
    -print(m.forward(input))
    -
    - -

    rescaling them so that the elements of the n-dimensional output Tensor -lie in the range (0,1) and sum to 1

    - -

    Softmax is defined as f_i(x) = exp(x_i - shift) / sum_j exp(x_j - shift) - where shift = max_i x_i

    - -

    Expected Shape

    - - - - - - - - - - - - - - - - - - -
    ShapeDescription
    input[ * , * ]2D Tensor of any size
    outputSameOutput has the same shape as input
    - -

    Returns

    - -

    a Tensor of the same dimension and shape as the input with - values in the range [0, 1]

    - -

    -Notes: - Note that this module doesn’t work directly with NLLLoss, - which expects the Log to be computed between the Softmax and itself. - Use Logsoftmax instead (it’s faster).

    - -
    -
    -
    -
    -
    -
    - - diff --git a/api/0.1.3/en/javascripts/all.js b/api/0.1.3/en/javascripts/all.js deleted file mode 100644 index fa3210b0fbfc..000000000000 --- a/api/0.1.3/en/javascripts/all.js +++ /dev/null @@ -1,168 +0,0 @@ -!function(){if("ontouchstart"in window){var e,t,n,i,r,o,s={};e=function(e,t){return Math.abs(e[0]-t[0])>5||Math.abs(e[1]-t[1])>5},t=function(e){this.startXY=[e.touches[0].clientX,e.touches[0].clientY],this.threshold=!1},n=function(t){return this.threshold?!1:void(this.threshold=e(this.startXY,[t.touches[0].clientX,t.touches[0].clientY]))},i=function(t){if(!this.threshold&&!e(this.startXY,[t.changedTouches[0].clientX,t.changedTouches[0].clientY])){var n=t.changedTouches[0],i=document.createEvent("MouseEvents");i.initMouseEvent("click",!0,!0,window,0,n.screenX,n.screenY,n.clientX,n.clientY,!1,!1,!1,!1,0,null),i.simulated=!0,t.target.dispatchEvent(i)}},r=function(e){var t=Date.now(),n=t-s.time,i=e.clientX,r=e.clientY,a=[Math.abs(s.x-i),Math.abs(s.y-r)],u=o(e.target,"A")||e.target,l=u.nodeName,c="A"===l,f=window.navigator.standalone&&c&&e.target.getAttribute("href");return s.time=t,s.x=i,s.y=r,(!e.simulated&&(500>n||1500>n&&a[0]<50&&a[1]<50)||f)&&(e.preventDefault(),e.stopPropagation(),!f)?!1:(f&&(window.location=u.getAttribute("href")),void(u&&u.classList&&(u.classList.add("energize-focus"),window.setTimeout(function(){u.classList.remove("energize-focus")},150))))},o=function(e,t){for(var n=e;n!==document.body;){if(!n||n.nodeName===t)return n;n=n.parentNode}return null},document.addEventListener("touchstart",t,!1),document.addEventListener("touchmove",n,!1),document.addEventListener("touchend",i,!1),document.addEventListener("click",r,!0)}}(),/*! - * jQuery JavaScript Library v2.2.0 - * http://jquery.com/ - * - * Includes Sizzle.js - * http://sizzlejs.com/ - * - * Copyright jQuery Foundation and other contributors - * Released under the MIT license - * http://jquery.org/license - * - * Date: 2016-01-08T20:02Z - */ -function(e,t){"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(e,t){function n(e){var t=!!e&&"length"in e&&e.length,n=oe.type(e);return"function"===n||oe.isWindow(e)?!1:"array"===n||0===t||"number"==typeof t&&t>0&&t-1 in e}function i(e,t,n){if(oe.isFunction(t))return oe.grep(e,function(e,i){return!!t.call(e,i,e)!==n});if(t.nodeType)return oe.grep(e,function(e){return e===t!==n});if("string"==typeof t){if(ge.test(t))return oe.filter(t,e,n);t=oe.filter(t,e)}return oe.grep(e,function(e){return Z.call(t,e)>-1!==n})}function r(e,t){for(;(e=e[t])&&1!==e.nodeType;);return e}function o(e){var t={};return oe.each(e.match(we)||[],function(e,n){t[n]=!0}),t}function s(){Q.removeEventListener("DOMContentLoaded",s),e.removeEventListener("load",s),oe.ready()}function a(){this.expando=oe.expando+a.uid++}function u(e,t,n){var i;if(void 0===n&&1===e.nodeType)if(i="data-"+t.replace(Ae,"-$&").toLowerCase(),n=e.getAttribute(i),"string"==typeof n){try{n="true"===n?!0:"false"===n?!1:"null"===n?null:+n+""===n?+n:Ne.test(n)?oe.parseJSON(n):n}catch(r){}ke.set(e,t,n)}else n=void 0;return n}function l(e,t,n,i){var r,o=1,s=20,a=i?function(){return i.cur()}:function(){return oe.css(e,t,"")},u=a(),l=n&&n[3]||(oe.cssNumber[t]?"":"px"),c=(oe.cssNumber[t]||"px"!==l&&+u)&&Le.exec(oe.css(e,t));if(c&&c[3]!==l){l=l||c[3],n=n||[],c=+u||1;do o=o||".5",c/=o,oe.style(e,t,c+l);while(o!==(o=a()/u)&&1!==o&&--s)}return n&&(c=+c||+u||0,r=n[1]?c+(n[1]+1)*n[2]:+n[2],i&&(i.unit=l,i.start=c,i.end=r)),r}function c(e,t){var n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[];return void 0===t||t&&oe.nodeName(e,t)?oe.merge([e],n):n}function f(e,t){for(var n=0,i=e.length;i>n;n++)Ee.set(e[n],"globalEval",!t||Ee.get(t[n],"globalEval"))}function d(e,t,n,i,r){for(var o,s,a,u,l,d,h=t.createDocumentFragment(),p=[],g=0,m=e.length;m>g;g++)if(o=e[g],o||0===o)if("object"===oe.type(o))oe.merge(p,o.nodeType?[o]:o);else if(Fe.test(o)){for(s=s||h.appendChild(t.createElement("div")),a=(qe.exec(o)||["",""])[1].toLowerCase(),u=Pe[a]||Pe._default,s.innerHTML=u[1]+oe.htmlPrefilter(o)+u[2],d=u[0];d--;)s=s.lastChild;oe.merge(p,s.childNodes),s=h.firstChild,s.textContent=""}else p.push(t.createTextNode(o));for(h.textContent="",g=0;o=p[g++];)if(i&&oe.inArray(o,i)>-1)r&&r.push(o);else if(l=oe.contains(o.ownerDocument,o),s=c(h.appendChild(o),"script"),l&&f(s),n)for(d=0;o=s[d++];)He.test(o.type||"")&&n.push(o);return h}function h(){return!0}function p(){return!1}function g(){try{return Q.activeElement}catch(e){}}function m(e,t,n,i,r,o){var s,a;if("object"==typeof t){"string"!=typeof n&&(i=i||n,n=void 0);for(a in t)m(e,a,n,i,t[a],o);return e}if(null==i&&null==r?(r=n,i=n=void 0):null==r&&("string"==typeof n?(r=i,i=void 0):(r=i,i=n,n=void 0)),r===!1)r=p;else if(!r)return this;return 1===o&&(s=r,r=function(e){return oe().off(e),s.apply(this,arguments)},r.guid=s.guid||(s.guid=oe.guid++)),e.each(function(){oe.event.add(this,t,r,i,n)})}function v(e,t){return oe.nodeName(e,"table")&&oe.nodeName(11!==t.nodeType?t:t.firstChild,"tr")?e.getElementsByTagName("tbody")[0]||e:e}function y(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function x(e){var t=ze.exec(e.type);return t?e.type=t[1]:e.removeAttribute("type"),e}function b(e,t){var n,i,r,o,s,a,u,l;if(1===t.nodeType){if(Ee.hasData(e)&&(o=Ee.access(e),s=Ee.set(t,o),l=o.events)){delete s.handle,s.events={};for(r in l)for(n=0,i=l[r].length;i>n;n++)oe.event.add(t,r,l[r][n])}ke.hasData(e)&&(a=ke.access(e),u=oe.extend({},a),ke.set(t,u))}}function w(e,t){var n=t.nodeName.toLowerCase();"input"===n&&_e.test(e.type)?t.checked=e.checked:("input"===n||"textarea"===n)&&(t.defaultValue=e.defaultValue)}function C(e,t,n,i){t=G.apply([],t);var r,o,s,a,u,l,f=0,h=e.length,p=h-1,g=t[0],m=oe.isFunction(g);if(m||h>1&&"string"==typeof g&&!ie.checkClone&&Be.test(g))return e.each(function(r){var o=e.eq(r);m&&(t[0]=g.call(this,r,o.html())),C(o,t,n,i)});if(h&&(r=d(t,e[0].ownerDocument,!1,e,i),o=r.firstChild,1===r.childNodes.length&&(r=o),o||i)){for(s=oe.map(c(r,"script"),y),a=s.length;h>f;f++)u=r,f!==p&&(u=oe.clone(u,!0,!0),a&&oe.merge(s,c(u,"script"))),n.call(e[f],u,f);if(a)for(l=s[s.length-1].ownerDocument,oe.map(s,x),f=0;a>f;f++)u=s[f],He.test(u.type||"")&&!Ee.access(u,"globalEval")&&oe.contains(l,u)&&(u.src?oe._evalUrl&&oe._evalUrl(u.src):oe.globalEval(u.textContent.replace(Ve,"")))}return e}function S(e,t,n){for(var i,r=t?oe.filter(t,e):e,o=0;null!=(i=r[o]);o++)n||1!==i.nodeType||oe.cleanData(c(i)),i.parentNode&&(n&&oe.contains(i.ownerDocument,i)&&f(c(i,"script")),i.parentNode.removeChild(i));return e}function T(e,t){var n=oe(t.createElement(e)).appendTo(t.body),i=oe.css(n[0],"display");return n.detach(),i}function E(e){var t=Q,n=Ue[e];return n||(n=T(e,t),"none"!==n&&n||(Xe=(Xe||oe(" + + +
    +
    +
    + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
    +
    + +
    +
    + + +
    + + + + + + + + +
    + +
    +
    + +
    +
    +

    August 08, 2025

    +

    + Amazon Ads +

    +
    +
    + +
    +
    +
    + +
    +

    + by + + Team PyTorch + +

    +

    Reduce inference costs by 71% and drive scale out using PyTorch, TorchServe, and AWS Inferentia.

    + +
    +
    +
    +
    + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + +
    + +
    + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/case_studies/salesforce.html b/case_studies/salesforce.html new file mode 100644 index 000000000000..bb45b119d7eb --- /dev/null +++ b/case_studies/salesforce.html @@ -0,0 +1,314 @@ + + + + + + + + + + + + + Salesforce | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
    +
    + +
    +
    + + +
    + + + + + + + + +
    + +
    +
    + +
    +
    +

    August 08, 2025

    +

    + Salesforce +

    +
    +
    + +
    +
    +
    + +
    +

    + by + + Team PyTorch + +

    +

    Pushing the state of the art in NLP and Multi-task learning.

    + +
    +
    +
    +
    + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + +
    + +
    + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/case_studies/stanford-university.html b/case_studies/stanford-university.html new file mode 100644 index 000000000000..e5fcf580c621 --- /dev/null +++ b/case_studies/stanford-university.html @@ -0,0 +1,314 @@ + + + + + + + + + + + + + Stanford University | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    +
    + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
    +
    + +
    +
    + + +
    + + + + + + + + +
    + +
    +
    + +
    +
    +

    August 08, 2025

    +

    + Stanford University +

    +
    +
    + +
    +
    +
    + +
    +

    + by + + Team PyTorch + +

    +

    Using PyTorch’s flexibility to efficiently research new algorithmic approaches.

    + +
    +
    +
    +
    + + +
    +
    +
    +
    +

    Docs

    +

    Access comprehensive developer documentation for PyTorch

    + View Docs +
    + +
    +

    Tutorials

    +

    Get in-depth tutorials for beginners and advanced developers

    + View Tutorials +
    + +
    +

    Resources

    +

    Find development resources and get your questions answered

    + View Resources +
    +
    +
    +
    + +
    + +
    + +
    +
    +
    +
    + + +
    +
    +
    + + +
    + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs.html b/docs.html deleted file mode 100644 index 58fe2e65f749..000000000000 --- a/docs.html +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: PyTorch | Documentation -id: docs -permalink: /docs/ -layout: default ---- - - - \ No newline at end of file diff --git a/docs/.buildinfo b/docs/.buildinfo deleted file mode 100644 index 8fad5f785870..000000000000 --- a/docs/.buildinfo +++ /dev/null @@ -1,4 +0,0 @@ -# Sphinx build info version 1 -# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 5c9c098f2b2b63c7722a5b518429df6c -tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/_modules/index.html b/docs/_modules/index.html deleted file mode 100644 index 5c9b5eac6720..000000000000 --- a/docs/_modules/index.html +++ /dev/null @@ -1,605 +0,0 @@ - - - - - - - - - - - Overview: module code — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - - - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch.html b/docs/_modules/torch.html deleted file mode 100644 index fb8327bac93c..000000000000 --- a/docs/_modules/torch.html +++ /dev/null @@ -1,896 +0,0 @@ - - - - - - - - - - - torch — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch

    -"""
    -The torch package contains data structures for multi-dimensional
    -tensors and mathematical operations over these are defined.
    -Additionally, it provides many utilities for efficient serializing of
    -Tensors and arbitrary types, and other useful utilities.
    -
    -It has a CUDA counterpart, that enables you to run your tensor computations
    -on an NVIDIA GPU with compute capability >= 2.0.
    -"""
    -
    -import sys
    -from ._utils import _import_dotted_name
    -from .version import __version__
    -
    -__all__ = [
    -    'typename', 'is_tensor', 'is_storage', 'set_default_tensor_type',
    -    'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed',
    -    'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack',
    -    'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',
    -    'ShortStorage', 'CharStorage', 'ByteStorage',
    -    'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',
    -    'ShortTensor', 'CharTensor', 'ByteTensor',
    -]
    -
    -################################################################################
    -# Load the extension module
    -################################################################################
    -
    -# Loading the extension with RTLD_GLOBAL option allows to not link extension
    -# modules against the _C shared object. Their missing THP symbols will be
    -# automatically filled by the dynamic loader.
    -import os as _dl_flags
    -
    -# if we have numpy, it *must* be imported before the call to setdlopenflags()
    -# or there is risk that later c modules will segfault when importing numpy
    -try:
    -    import numpy as np
    -except:
    -    pass
    -
    -# first check if the os package has the required flags
    -if not hasattr(_dl_flags, 'RTLD_GLOBAL') or not hasattr(_dl_flags, 'RTLD_NOW'):
    -    try:
    -        # next try if DLFCN exists
    -        import DLFCN as _dl_flags
    -    except ImportError:
    -        # as a last attempt, use compile-time constants
    -        import torch._dl as _dl_flags
    -
    -old_flags = sys.getdlopenflags()
    -sys.setdlopenflags(_dl_flags.RTLD_GLOBAL | _dl_flags.RTLD_NOW)
    -
    -from torch._C import *
    -
    -__all__ += [name for name in dir(_C)
    -            if name[0] != '_' and
    -            not name.endswith('Base')]
    -
    -sys.setdlopenflags(old_flags)
    -del _dl_flags
    -del old_flags
    -
    -################################################################################
    -# Define basic utilities
    -################################################################################
    -
    -
    -def typename(o):
    -    module = ''
    -    class_name = ''
    -    if hasattr(o, '__module__') and o.__module__ != 'builtins' \
    -            and o.__module__ != '__builtin__' and o.__module__ is not None:
    -        module = o.__module__ + '.'
    -
    -    if hasattr(o, '__qualname__'):
    -        class_name = o.__qualname__
    -    elif hasattr(o, '__name__'):
    -        class_name = o.__name__
    -    else:
    -        class_name = o.__class__.__name__
    -
    -    return module + class_name
    -
    -
    -
    [docs]def is_tensor(obj): - r"""Returns True if `obj` is a pytorch tensor. - - Args: - obj (Object): Object to test - """ - return obj.__class__ in _tensor_classes
    - - -
    [docs]def is_storage(obj): - r"""Returns True if `obj` is a pytorch storage object. - - Args: - obj (Object): Object to test - """ - return obj.__class__ in _storage_classes
    - - -
    [docs]def set_default_tensor_type(t): - global Tensor - global Storage - Tensor = _import_dotted_name(t) - Storage = _import_dotted_name(t.replace('Tensor', 'Storage')) - _C._set_default_tensor_type(Tensor)
    - - -
    [docs]def set_rng_state(new_state): - r"""Sets the random number generator state. - - Args: - new_state (torch.ByteTensor): The desired state - """ - default_generator.set_state(new_state)
    - - -
    [docs]def get_rng_state(): - r"""Returns the random number generator state as a ByteTensor.""" - return default_generator.get_state()
    - - -
    [docs]def manual_seed(seed): - r"""Sets the seed for generating random numbers. And returns a - `torch._C.Generator` object. - - Args: - seed (int or long): The desired seed. - """ - return default_generator.manual_seed(seed)
    - - -
    [docs]def initial_seed(): - r"""Returns the initial seed for generating random numbers as a - python `long`. - """ - return default_generator.initial_seed()
    - - -from .serialization import save, load -from ._tensor_str import set_printoptions - -################################################################################ -# Define Storage and Tensor classes -################################################################################ - -from .storage import _StorageBase -from .tensor import _TensorBase - - -class DoubleStorage(_C.DoubleStorageBase, _StorageBase): - pass - - -
    [docs]class FloatStorage(_C.FloatStorageBase, _StorageBase): - pass
    - - -class HalfStorage(_C.HalfStorageBase, _StorageBase): - pass - - -class LongStorage(_C.LongStorageBase, _StorageBase): - pass - - -class IntStorage(_C.IntStorageBase, _StorageBase): - pass - - -class ShortStorage(_C.ShortStorageBase, _StorageBase): - pass - - -class CharStorage(_C.CharStorageBase, _StorageBase): - pass - - -class ByteStorage(_C.ByteStorageBase, _StorageBase): - pass - - -class DoubleTensor(_C.DoubleTensorBase, _TensorBase): - - def is_signed(self): - return True - - @classmethod - def storage_type(cls): - return DoubleStorage - - -class FloatTensor(_C.FloatTensorBase, _TensorBase): - - def is_signed(self): - return True - - @classmethod - def storage_type(cls): - return FloatStorage - - -class HalfTensor(_C.HalfTensorBase, _TensorBase): - - def is_signed(self): - return True - - @classmethod - def storage_type(cls): - return HalfStorage - - -class LongTensor(_C.LongTensorBase, _TensorBase): - - def is_signed(self): - return True - - @classmethod - def storage_type(cls): - return LongStorage - - -class IntTensor(_C.IntTensorBase, _TensorBase): - - def is_signed(self): - return True - - @classmethod - def storage_type(cls): - return IntStorage - - -class ShortTensor(_C.ShortTensorBase, _TensorBase): - - def is_signed(self): - return True - - @classmethod - def storage_type(cls): - return ShortStorage - - -class CharTensor(_C.CharTensorBase, _TensorBase): - - def is_signed(self): - # TODO - return False - - @classmethod - def storage_type(cls): - return CharStorage - - -class ByteTensor(_C.ByteTensorBase, _TensorBase): - - def is_signed(self): - return False - - @classmethod - def storage_type(cls): - return ByteStorage - - -_storage_classes = { - DoubleStorage, FloatStorage, LongStorage, IntStorage, ShortStorage, - CharStorage, ByteStorage, -} - -_tensor_classes = { - DoubleTensor, FloatTensor, LongTensor, IntTensor, ShortTensor, - CharTensor, ByteTensor, -} - - -set_default_tensor_type('torch.FloatTensor') - -################################################################################ -# Import interface functions defined in Python -################################################################################ - -from .functional import * - - -################################################################################ -# Initialize extension -################################################################################ - -def manager_path(): - import os - path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'lib', 'torch_shm_manager') - if not os.path.exists(path): - raise RuntimeError("Unable to find torch_shm_manager at " + path) - return path.encode('utf-8') - - -# Shared memory manager needs to know the exact location of manager executable -_C._initExtension(manager_path()) -del manager_path - -################################################################################ -# Remove unnecessary members -################################################################################ - -del DoubleStorageBase -del FloatStorageBase -del LongStorageBase -del IntStorageBase -del ShortStorageBase -del CharStorageBase -del ByteStorageBase -del DoubleTensorBase -del FloatTensorBase -del LongTensorBase -del IntTensorBase -del ShortTensorBase -del CharTensorBase -del ByteTensorBase - -del SparseDoubleTensorBase -del SparseFloatTensorBase -del SparseLongTensorBase -del SparseIntTensorBase -del SparseShortTensorBase -del SparseCharTensorBase -del SparseByteTensorBase - -################################################################################ -# Import most common subpackages -################################################################################ - -import torch.cuda -import torch.autograd -import torch.nn -import torch.optim -import torch.multiprocessing - -# attach docstrings to torch and tensor functions -from . import _torch_docs, _tensor_docs -del _torch_docs, _tensor_docs -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/_tensor_str.html b/docs/_modules/torch/_tensor_str.html deleted file mode 100644 index 0bf3ff40fbb2..000000000000 --- a/docs/_modules/torch/_tensor_str.html +++ /dev/null @@ -1,854 +0,0 @@ - - - - - - - - - - - torch._tensor_str — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch._tensor_str

    -import math
    -import torch
    -from functools import reduce
    -from ._utils import _range
    -
    -
    -class __PrinterOptions(object):
    -    precision = 4
    -    threshold = 1000
    -    edgeitems = 3
    -    linewidth = 80
    -
    -
    -PRINT_OPTS = __PrinterOptions()
    -SCALE_FORMAT = '{:.5e} *\n'
    -
    -
    -# We could use **kwargs, but this will give better docs
    -
    [docs]def set_printoptions( - precision=None, - threshold=None, - edgeitems=None, - linewidth=None, - profile=None, -): - """Set options for printing. Items shamelessly taken from Numpy - - Args: - precision: Number of digits of precision for floating point output - (default 8). - threshold: Total number of array elements which trigger summarization - rather than full repr (default 1000). - edgeitems: Number of array items in summary at beginning and end of - each dimension (default 3). - linewidth: The number of characters per line for the purpose of - inserting line breaks (default 80). Thresholded matricies will - ignore this parameter. - profile: Sane defaults for pretty printing. Can override with any of - the above options. (default, short, full) - """ - if profile is not None: - if profile == "default": - PRINT_OPTS.precision = 4 - PRINT_OPTS.threshold = 1000 - PRINT_OPTS.edgeitems = 3 - PRINT_OPTS.linewidth = 80 - elif profile == "short": - PRINT_OPTS.precision = 2 - PRINT_OPTS.threshold = 1000 - PRINT_OPTS.edgeitems = 2 - PRINT_OPTS.linewidth = 80 - elif profile == "full": - PRINT_OPTS.precision = 4 - PRINT_OPTS.threshold = float('inf') - PRINT_OPTS.edgeitems = 3 - PRINT_OPTS.linewidth = 80 - - if precision is not None: - PRINT_OPTS.precision = precision - if threshold is not None: - PRINT_OPTS.threshold = threshold - if edgeitems is not None: - PRINT_OPTS.edgeitems = edgeitems - if linewidth is not None: - PRINT_OPTS.linewidth = linewidth
    - - -def _number_format(tensor, min_sz=-1): - min_sz = max(min_sz, 2) - tensor = torch.DoubleTensor(tensor.nelement()).copy_(tensor).abs_() - - pos_inf_mask = tensor.eq(float('inf')) - neg_inf_mask = tensor.eq(float('-inf')) - nan_mask = tensor.ne(tensor) - invalid_value_mask = pos_inf_mask + neg_inf_mask + nan_mask - if invalid_value_mask.all(): - example_value = 0 - else: - example_value = tensor[invalid_value_mask.eq(0)][0] - tensor[invalid_value_mask] = example_value - if invalid_value_mask.any(): - min_sz = max(min_sz, 3) - - int_mode = True - # TODO: use fmod? - for value in tensor: - if value != math.ceil(value): - int_mode = False - break - - exp_min = tensor.min() - if exp_min != 0: - exp_min = math.floor(math.log10(exp_min)) + 1 - else: - exp_min = 1 - exp_max = tensor.max() - if exp_max != 0: - exp_max = math.floor(math.log10(exp_max)) + 1 - else: - exp_max = 1 - - scale = 1 - exp_max = int(exp_max) - prec = PRINT_OPTS.precision - if int_mode: - if exp_max > prec + 1: - format = '{{:11.{}e}}'.format(prec) - sz = max(min_sz, 7 + prec) - else: - sz = max(min_sz, exp_max + 1) - format = '{:' + str(sz) + '.0f}' - else: - if exp_max - exp_min > prec: - sz = 7 + prec - if abs(exp_max) > 99 or abs(exp_min) > 99: - sz = sz + 1 - sz = max(min_sz, sz) - format = '{{:{}.{}e}}'.format(sz, prec) - else: - if exp_max > prec + 1 or exp_max < 0: - sz = max(min_sz, 7) - scale = math.pow(10, exp_max - 1) - else: - if exp_max == 0: - sz = 7 - else: - sz = exp_max + 6 - sz = max(min_sz, sz) - format = '{{:{}.{}f}}'.format(sz, prec) - return format, scale, sz - - -def _tensor_str(self): - n = PRINT_OPTS.edgeitems - has_hdots = self.size()[-1] > 2 * n - has_vdots = self.size()[-2] > 2 * n - print_full_mat = not has_hdots and not has_vdots - formatter = _number_format(self, min_sz=3 if not print_full_mat else 0) - print_dots = self.numel() >= PRINT_OPTS.threshold - - dim_sz = max(2, max(len(str(x)) for x in self.size())) - dim_fmt = "{:^" + str(dim_sz) + "}" - dot_fmt = u"{:^" + str(dim_sz + 1) + "}" - - counter_dim = self.ndimension() - 2 - counter = torch.LongStorage(counter_dim).fill_(0) - counter[counter.size() - 1] = -1 - finished = False - strt = '' - while True: - nrestarted = [False for i in counter] - nskipped = [False for i in counter] - for i in _range(counter_dim - 1, -1, -1): - counter[i] += 1 - if print_dots and counter[i] == n and self.size(i) > 2 * n: - counter[i] = self.size(i) - n - nskipped[i] = True - if counter[i] == self.size(i): - if i == 0: - finished = True - counter[i] = 0 - nrestarted[i] = True - else: - break - if finished: - break - elif print_dots: - if any(nskipped): - for hdot in nskipped: - strt += dot_fmt.format('...') if hdot \ - else dot_fmt.format('') - strt += '\n' - if any(nrestarted): - strt += ' ' - for vdot in nrestarted: - strt += dot_fmt.format(u'\u22EE' if vdot else '') - strt += '\n' - if strt != '': - strt += '\n' - strt += '({},.,.) = \n'.format( - ','.join(dim_fmt.format(i) for i in counter)) - submatrix = reduce(lambda t, i: t.select(0, i), counter, self) - strt += _matrix_str(submatrix, ' ', formatter, print_dots) - return strt - - -def __repr_row(row, indent, fmt, scale, sz, truncate=None): - if truncate is not None: - dotfmt = " {:^5} " - return (indent + - ' '.join(fmt.format(val / scale) for val in row[:truncate]) + - dotfmt.format('...') + - ' '.join(fmt.format(val / scale) for val in row[-truncate:]) + - '\n') - else: - return indent + ' '.join(fmt.format(val / scale) for val in row) + '\n' - - -def _matrix_str(self, indent='', formatter=None, force_truncate=False): - n = PRINT_OPTS.edgeitems - has_hdots = self.size(1) > 2 * n - has_vdots = self.size(0) > 2 * n - print_full_mat = not has_hdots and not has_vdots - - if formatter is None: - fmt, scale, sz = _number_format(self, - min_sz=5 if not print_full_mat else 0) - else: - fmt, scale, sz = formatter - nColumnPerLine = int(math.floor((PRINT_OPTS.linewidth - len(indent)) / (sz + 1))) - strt = '' - firstColumn = 0 - - if not force_truncate and \ - (self.numel() < PRINT_OPTS.threshold or print_full_mat): - while firstColumn < self.size(1): - lastColumn = min(firstColumn + nColumnPerLine - 1, self.size(1) - 1) - if nColumnPerLine < self.size(1): - strt += '\n' if firstColumn != 1 else '' - strt += 'Columns {} to {} \n{}'.format( - firstColumn, lastColumn, indent) - if scale != 1: - strt += SCALE_FORMAT.format(scale) - for l in _range(self.size(0)): - strt += indent + (' ' if scale != 1 else '') - row_slice = self[l, firstColumn:lastColumn + 1] - strt += ' '.join(fmt.format(val / scale) for val in row_slice) - strt += '\n' - firstColumn = lastColumn + 1 - else: - if scale != 1: - strt += SCALE_FORMAT.format(scale) - if has_vdots and has_hdots: - vdotfmt = "{:^" + str((sz + 1) * n - 1) + "}" - ddotfmt = u"{:^5}" - for row in self[:n]: - strt += __repr_row(row, indent, fmt, scale, sz, n) - strt += indent + ' '.join([vdotfmt.format('...'), - ddotfmt.format(u'\u22F1'), - vdotfmt.format('...')]) + "\n" - for row in self[-n:]: - strt += __repr_row(row, indent, fmt, scale, sz, n) - elif not has_vdots and has_hdots: - for row in self: - strt += __repr_row(row, indent, fmt, scale, sz, n) - elif has_vdots and not has_hdots: - vdotfmt = u"{:^" + \ - str(len(__repr_row(self[0], '', fmt, scale, sz))) + \ - "}\n" - for row in self[:n]: - strt += __repr_row(row, indent, fmt, scale, sz) - strt += vdotfmt.format(u'\u22EE') - for row in self[-n:]: - strt += __repr_row(row, indent, fmt, scale, sz) - else: - for row in self: - strt += __repr_row(row, indent, fmt, scale, sz) - return strt - - -def _vector_str(self): - fmt, scale, sz = _number_format(self) - strt = '' - ident = '' - n = PRINT_OPTS.edgeitems - dotfmt = u"{:^" + str(sz) + "}\n" - if scale != 1: - strt += SCALE_FORMAT.format(scale) - ident = ' ' - if self.numel() < PRINT_OPTS.threshold: - return (strt + - '\n'.join(ident + fmt.format(val / scale) for val in self) + - '\n') - else: - return (strt + - '\n'.join(ident + fmt.format(val / scale) for val in self[:n]) + - '\n' + (ident + dotfmt.format(u"\u22EE")) + - '\n'.join(ident + fmt.format(val / scale) for val in self[-n:]) + - '\n') - - -def _str(self): - if self.ndimension() == 0: - return '[{} with no dimension]\n'.format(torch.typename(self)) - elif self.ndimension() == 1: - strt = _vector_str(self) - elif self.ndimension() == 2: - strt = _matrix_str(self) - else: - strt = _tensor_str(self) - - size_str = 'x'.join(str(size) for size in self.size()) - device_str = '' if not self.is_cuda else \ - ' (GPU {})'.format(self.get_device()) - strt += '[{} of size {}{}]\n'.format(torch.typename(self), - size_str, device_str) - return '\n' + strt -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/_utils.html b/docs/_modules/torch/_utils.html deleted file mode 100644 index 5c0758bf68fe..000000000000 --- a/docs/_modules/torch/_utils.html +++ /dev/null @@ -1,657 +0,0 @@ - - - - - - - - - - - torch._utils — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch._utils

    -import torch
    -import importlib
    -
    -
    -def _type(self, new_type=None, async=False):
    -    """Casts this object to the specified type.
    -
    -    If this is already of the correct type, no copy is performed and the
    -    original object is returned.
    -
    -    Args:
    -        new_type (type or string): The desired type
    -        async (bool): If True, and the source is in pinned memory and
    -                      destination is on the GPU or vice versa, the copy is
    -                      performed asynchronously with respect to the host.
    -                      Otherwise, the argument has no effect.
    -    """
    -    if new_type is None:
    -        return self.__module__ + '.' + self.__class__.__name__
    -
    -    if isinstance(new_type, str):
    -        new_type = _import_dotted_name(new_type)
    -    if new_type == type(self):
    -        return self
    -    if self.is_sparse:
    -        if not new_type.is_sparse:
    -            raise RuntimeError("Cannot cast sparse tensor to dense tensor")
    -        new_type_name = new_type.__module__ + '.' + new_type.__name__
    -        new_values_type_name = new_type_name.replace('.sparse', '')
    -        new_values = self.values().type(new_values_type_name, async)
    -        return new_type(self.indices(), new_values, self.size())
    -    if new_type.is_sparse:
    -        raise RuntimeError("Cannot cast dense tensor to sparse tensor")
    -    return new_type(self.size()).copy_(self, async)
    -
    -
    -def _cuda(self, device=None, async=False):
    -    """Returns a copy of this object in CUDA memory.
    -
    -    If this object is already in CUDA memory and on the correct device, then
    -    no copy is performed and the original object is returned.
    -
    -    Args:
    -        device (int): The destination GPU id. Defaults to the current device.
    -        async (bool): If True and the source is in pinned memory, the copy will
    -                      be asynchronous with respect to the host. Otherwise, the
    -                      argument has no effect.
    -    """
    -    if self.is_cuda:
    -        if device is None:
    -            device = torch.cuda.current_device()
    -        if self.get_device() == device:
    -            return self
    -    else:
    -        if device is None:
    -            device = -1
    -    with torch.cuda.device(device):
    -        if self.is_sparse:
    -            new_type = getattr(torch.cuda.sparse, self.__class__.__name__)
    -            indices = self.indices().cuda(device, async)
    -            values = self.values().cuda(device, async)
    -            return new_type(indices, values, self.size())
    -        else:
    -            new_type = getattr(torch.cuda, self.__class__.__name__)
    -            return new_type(self.size()).copy_(self, async)
    -
    -
    -def _rebuild_tensor(storage, storage_offset, size, stride):
    -    class_name = storage.__class__.__name__.replace('Storage', 'Tensor')
    -    module = importlib.import_module(storage.__module__)
    -    tensor_class = getattr(module, class_name)
    -    return tensor_class().set_(storage, storage_offset, size, stride)
    -
    -
    -def _range(*args, **kwargs):
    -    return __builtins__['range'](*args, **kwargs)
    -
    -
    -def _import_dotted_name(name):
    -    components = name.split('.')
    -    obj = __import__(components[0])
    -    for component in components[1:]:
    -        obj = getattr(obj, component)
    -    return obj
    -
    -
    -# Taken from python 3.5 docs
    -def _accumulate(iterable, fn=lambda x, y: x + y):
    -    'Return running totals'
    -    # _accumulate([1,2,3,4,5]) --> 1 3 6 10 15
    -    # _accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
    -    it = iter(iterable)
    -    try:
    -        total = next(it)
    -    except StopIteration:
    -        return
    -    yield total
    -    for element in it:
    -        total = fn(total, element)
    -        yield total
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/autograd.html b/docs/_modules/torch/autograd.html deleted file mode 100644 index 79fc15795052..000000000000 --- a/docs/_modules/torch/autograd.html +++ /dev/null @@ -1,601 +0,0 @@ - - - - - - - - - - - torch.autograd — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.autograd

    -"""
    -torch.autograd provides classes and functions implementing automatic
    -differentiation of arbitrary scalar valued functions. It requires minimal
    -changes to the existing code - you only need to wrap all tensors in
    -:class:`.Variable` objects.
    -"""
    -import torch
    -
    -from .variable import Variable
    -from .function import Function, NestedIOFunction
    -from .stochastic_function import StochasticFunction
    -from .gradcheck import gradcheck
    -
    -__all__ = ['Variable', 'Function', 'StochasticFunction', 'backward']
    -
    -
    -
    [docs]def backward(variables, grad_variables, retain_variables=False): - """Computes the sum of gradients of given variables w.r.t. graph leaves. - - The graph is differentiated using the chain rule. If any of ``variables`` - are non-scalar (i.e. their data has more than one element) and require - gradient, the function additionaly requires specifying ``grad_variables``. - It should be a sequence of matching length, that containins gradient of - the differentiated function w.r.t. corresponding variables (``None`` is an - acceptable value for all variables that don't need gradient tensors). - - This function accumulates gradients in the leaves - you might need to zero - them before calling it. - - Arguments: - variables (sequence of Variable): Variables of which the derivative will be - computed. - grad_variables (sequence of Tensor): Gradients w.r.t. each element of - corresponding variables. Required only for non-scalar variables that - require gradient. - retain_variables (bool): If ``True``, buffers necessary for computing - gradients won't be freed after use. It is only necessary to - specify ``True`` if you want to differentiate some subgraph multiple - times. - """ - Variable._execution_engine.run_backward( - tuple(variables), tuple(grad_variables), retain_variables)
    - -assert torch._C._autograd_init() -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/autograd/function.html b/docs/_modules/torch/autograd/function.html deleted file mode 100644 index 2d1180866c8f..000000000000 --- a/docs/_modules/torch/autograd/function.html +++ /dev/null @@ -1,807 +0,0 @@ - - - - - - - - - - - torch.autograd.function — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.autograd.function

    -import torch
    -import torch._C as _C
    -import torch.utils.hooks as hooks
    -from collections import OrderedDict
    -
    -
    -
    [docs]class Function(_C._FunctionBase): - """Records operation history and defines formulas for differentiating ops. - - Every operation performed on :class:`Variable` s creates a new function - object, that performs the computation, and records that it happened. - The history is retained in the form of a DAG of functions, with edges - denoting data dependencies (``input <- output``). Then, when backward is - called, the graph is processed in the topological ordering, by calling - :func:`backward` methods of each :class:`Function` object, and passing - returned gradients on to next :class:`Function` s. - - Normally, the only way users interact with functions is by creating - subclasses and defining new operations. This is a recommended way of - extending torch.autograd. - - Since Function logic is a hotspot in most scripts, almost all of it - was moved to our C backend, to ensure that the framework overhead is - minimal. - - Each function is meant to be used only once (in the forward pass). - - Attributes: - saved_tensors: Tuple of Tensors that were saved in the call to - :func:`forward`. - needs_input_grad: Tuple of booleans of length :attr:`num_inputs`, - indicating whether a given input requires gradient. This can be - used to optimize buffers saved for backward, and ignoring gradient - computation in :func:`~Function.backward`. - num_inputs: Number of inputs given to :func:`forward`. - num_outputs: Number of tensors returned by :func:`forward`. - requires_grad: Boolean indicating whether the :func:`backward` will - ever need to be called. - previous_functions: Tuple of (int, Function) pairs of length - :attr:`num_inputs`. Each entry contains a reference to a - :class:`Function` that created corresponding input, and an index - of the previous function output that's been used. - """ - __call__ = _C._FunctionBase._do_forward - -
    [docs] def save_for_backward(self, *tensors): - """Saves given tensors for a future call to :func:`~Function.backward`. - - **This should be called at most once, and only from inside the** - :func:`forward` **method.** - - Later, saved tensors can be accessed through the :attr:`saved_tensors` - attribute. Before returning them to the user, a check is made, to - ensure they weren't used in any in-place operation that modified - their content. - - Arguments can also be ``None``. - """ - self.to_save = tensors
    - -
    [docs] def mark_dirty(self, *args): - """Marks given tensors as modified in an in-place operation. - - **This should be called at most once, only from inside the** - :func:`forward` **method, and all arguments should be inputs.** - - Every tensor that's been modified in-place in a call to :func:`forward` - should be given to this function, to ensure correcness of our checks. - It doesn't matter wheter the function is called before or after - modification. - """ - self.dirty_tensors = args
    - -
    [docs] def mark_shared_storage(self, *pairs): - """Marks that given pairs of distinct tensors are sharing storage. - - **This should be called at most once, only from inside the** - :func:`forward` **method, and all arguments should be pairs of - (input, output).** - - If some of the outputs are going to be tensors sharing storage with - some of the inputs, all pairs of (input_arg, output_arg) should be - given to this function, to ensure correctness checking of in-place - modification. The only exception is when an output is exactly the same - tensor as input (e.g. in-place ops). In such case it's easy to conclude - that they're sharing data, so we don't require specifying such - dependencies. - - This function is not needed in most functions. It's primarily used in - indexing and transpose ops. - """ - self.shared_pairs = pairs
    - -
    [docs] def mark_non_differentiable(self, *args): - """Marks outputs as non-differentiable. - - **This should be called at most once, only from inside the** - :func:`forward` **method, and all arguments should be outputs.** - - This will mark outputs as not requiring gradients, increasing the - efficiency of backward computation. You still need to accept a gradient - for each output in :meth:`~Function.backward`, but it's always going to - be ``None``. - - This is used e.g. for indices returned from a max :class:`Function`. - """ - self.non_differentiable = args
    - - @staticmethod - def _register_hook(backward_hooks, hook): - if backward_hooks is None: - backward_hooks = OrderedDict() - handle = hooks.RemovableHandle(backward_hooks) - backward_hooks[handle.id] = hook - return backward_hooks, handle - -
    [docs] def forward(self, *input): - """Performs the operation. - - This function is to be overriden by all subclasses. - - It can take and return an arbitrary number of tensors. - """ - raise NotImplementedError
    - -
    [docs] def backward(self, *grad_output): - """Defines a formula for differentiating the operation. - - This function is to be overriden by all subclasses. - - All arguments are tensors. It has to accept exactly as many arguments, - as many outputs did :func:`forward` return, and it should return as - many tensors, as there were inputs to :func:`forward`. Each argument - is the gradient w.r.t the given output, and each returned value should - be the gradient w.r.t. the corresponding input. - """ - raise NotImplementedError
    - - -class InplaceFunction(Function): - - def __init__(self, inplace=False): - super(InplaceFunction, self).__init__() - self.inplace = inplace - - -def _nested_map(condition, fn): - def _map(obj): - if condition(obj): - return fn(obj) - elif obj is None: - return None - elif isinstance(obj, (list, tuple)): - return type(obj)(_map(x) for x in obj) - else: - raise ValueError("NestedIOFunction doesn't know how to process " - "an input object of type " + torch.typename(obj)) - return _map - - -def _iter_filter(condition): - def _iter(obj): - if condition(obj): - yield obj - elif obj is None: - return - elif isinstance(obj, (list, tuple)): - for o in obj: - for var in _iter(o): - yield var - else: - raise ValueError("NestedIOFunction doesn't know how to process " - "an input object of type " + torch.typename(obj)) - return _iter - - -def _unflatten(input, proto): - # unflatten a list or tuple input into a nested list/tuple structure - # specified by proto - def unflatten_helper(input, proto): - res = [] - if not isinstance(proto, (list, tuple)): - return input[0], input[1:] - for e in proto: - res_e, input = unflatten_helper(input, e) - res.append(res_e) - return type(proto)(res), input - - return unflatten_helper(input, proto)[0] - -_iter_variables = _iter_filter(lambda o: isinstance(o, torch.autograd.Variable)) -_iter_tensors = _iter_filter(torch.is_tensor) -_iter_None_tensors = _iter_filter(lambda o: o is None or torch.is_tensor(o)) -_map_variable_tensor = _nested_map(lambda o: isinstance(o, torch.autograd.Variable), lambda o: o.data) - - -class NestedIOFunction(Function): - - def _do_forward(self, *input): - self._nested_input = input - flat_input = tuple(_iter_variables(input)) - flat_output = super(NestedIOFunction, self)._do_forward(*flat_input) - nested_output = self._nested_output - nested_variables = _unflatten(flat_output, self._nested_output) - return nested_variables - - def _do_backward(self, gradients, retain_variables): - self.retain_variables = retain_variables - result = super(NestedIOFunction, self)._do_backward(gradients, retain_variables) - if not retain_variables: - del self._nested_output - del self._to_save_nested - return result - - def backward(self, *gradients): - nested_gradients = _unflatten(gradients, self._nested_output) - result = self.backward_extended(*nested_gradients) - return tuple(_iter_None_tensors(result)) - - __call__ = _do_forward - - def forward(self, *args): - nested_tensors = _map_variable_tensor(self._nested_input) - result = self.forward_extended(*nested_tensors) - del self._nested_input - self._nested_output = result - return tuple(_iter_tensors(result)) - - def save_for_backward(self, *args): - self.to_save = tuple(_iter_tensors(args)) - self._to_save_nested = args - - @property - def saved_tensors(self): - flat_tensors = super(NestedIOFunction, self).saved_tensors - return _unflatten(flat_tensors, self._to_save_nested) - - def mark_dirty(self, *args, **kwargs): - self.dirty_tensors = tuple(_iter_tensors((args, kwargs))) - - def mark_non_differentiable(self, *args, **kwargs): - self.non_differentiable = tuple(_iter_tensors((args, kwargs))) - - def forward_extended(self, *input): - raise NotImplementedError - - def backward_extended(self, *grad_output): - raise NotImplementedError -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/autograd/variable.html b/docs/_modules/torch/autograd/variable.html deleted file mode 100644 index 0018f54ed06d..000000000000 --- a/docs/_modules/torch/autograd/variable.html +++ /dev/null @@ -1,1447 +0,0 @@ - - - - - - - - - - - torch.autograd.variable — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.autograd.variable

    -import sys
    -import torch._C as _C
    -from collections import OrderedDict
    -import torch.sparse as sparse
    -import torch.utils.hooks as hooks
    -
    -from ._functions import *
    -
    -
    -
    [docs]class Variable(_C._VariableBase): - """Wraps a tensor and records the operations applied to it. - - Variable is a thin wrapper around a Tensor object, that also holds - the gradient w.r.t. to it, and a reference to a function that created it. - This reference allows retracing the whole chain of operations that - created the data. If the Variable has been created by the user, its creator - will be ``None`` and we call such objects *leaf* Variables. - - Since autograd only supports scalar valued function differentiation, grad - size always matches the data size. Also, grad is normally only allocated - for leaf variables, and will be always zero otherwise. - - Attributes: - data: Wrapped tensor of any type. - grad: Variable holding the gradient of type and location matching - the ``.data``. This attribute is lazily allocated and can't - be reassigned. - requires_grad: Boolean indicating whether the Variable has been - created by a subgraph containing any Variable, that requires it. - See :ref:`excluding-subgraphs` for more details. - Can be changed only on leaf Variables. - volatile: Boolean indicating that the Variable should be used in - inference mode, i.e. don't save the history. See - :ref:`excluding-subgraphs` for more details. - Can be changed only on leaf Variables. - creator: Function of which the variable was an output. For leaf - (user created) variables it's ``None``. Read-only attribute. - - Parameters: - data (any tensor class): Tensor to wrap. - requires_grad (bool): Value of the requires_grad flag. **Keyword only.** - volatile (bool): Value of the volatile flag. **Keyword only.** - """ - - _fallthrough_methods = { - 'size', - 'stride', - 'nelement', - 'ndimension', - 'element_size', - 'is_contiguous', - 'is_set_to', - 'is_signed', - 'numel', - 'dim', - 'get_device', - 'is_cuda', - } - - def __getattr__(self, name): - if name in self._fallthrough_methods: - return getattr(self.data, name) - raise AttributeError(name) - - def __getitem__(self, key): - if (isinstance(key, Variable) and - type(key.data).__name__ == 'ByteTensor'): - return MaskedSelect()(self, key) - return Index(key)(self) - - def __setitem__(self, key, value): - if (isinstance(key, Variable) and - type(key.data).__name__ == 'ByteTensor'): - if isinstance(value, Variable): - return MaskedCopy(inplace=True)(self, key, value) - else: - return MaskedFill(value, inplace=True)(self, key) - else: - if isinstance(value, Variable): - return SetItem(key)(self, value) - else: - return SetItem(key, value)(self) - - def __deepcopy__(self, memo): - if self.creator is not None: - raise RuntimeError("Only Variables created explicitly by the user " - "(graph leaves) support the deepcopy protocol at the moment") - result = type(self)(self.data.clone()) - result.requires_grad = self.requires_grad - result.volatile = self.volatile - memo[id(self)] = result - return result - - def __reduce_ex__(self, proto): - state = (self.requires_grad, self.volatile, self._backward_hooks) - if proto > 1: - return type(self), (self.data,), state - if sys.version_info[0] == 2: - from copy_reg import __newobj__ - else: - from copyreg import __newobj__ - return __newobj__, (type(self), self.data), state - - def __setstate__(self, state): - if len(state) == 5: - # legacy serialization of Variable - self.data = state[0] - state = (state[3], state[4], state[2]) - if self.creator is not None: - raise RuntimeError('__setstate__ can be only called on leaf variables') - self.requires_grad, self.volatile, self._backward_hooks = state - - def __repr__(self): - return 'Variable containing:' + self.data.__repr__() - -
    [docs] def backward(self, gradient=None, retain_variables=False): - """Computes the gradient of current variable w.r.t. graph leaves. - - The graph is differentiated using the chain rule. If the variable is - non-scalar (i.e. its data has more than one element) and requires - gradient, the function additionaly requires specifying ``gradient``. - It should be a tensor of matching type and location, that containins - the gradient of the differentiated function w.r.t. ``self``. - - This function accumulates gradients in the leaves - you might need to zero - them before calling it. - - Arguments: - gradient (Tensor): Gradient of the differentiated function - w.r.t. the data. Required only if the data has more than one - element. Type and location should match these of ``self.data``. - retain_variables (bool): If ``True``, buffers necessary for computing - gradients won't be freed after use. It is only necessary to - specify ``True`` if you want to differentiate some subgraph multiple - times (in some cases it will be much more efficient to use - `autograd.backward`). - """ - if self.volatile: - raise RuntimeError('calling backward on a volatile variable') - if gradient is None and self.requires_grad: - if self.data.numel() != 1: - raise RuntimeError( - 'backward should be called only on a scalar (i.e. 1-element tensor) ' - 'or with gradient w.r.t. the variable') - gradient = self.data.new().resize_as_(self.data).fill_(1) - self._execution_engine.run_backward((self,), (gradient,), retain_variables)
    - -
    [docs] def register_hook(self, hook): - """Registers a backward hook. - - The hook will be called every time a gradient with respect to the - variable is computed. The hook should have the following signature:: - - hook(grad) -> Variable or None - - The hook should not modify its argument, but it can optionally return - a new gradient which will be used in place of :attr:`grad`. - - This function returns a handle with a method ``handle.remove()`` - that removes the hook from the module. - - Example: - >>> v = Variable(torch.Tensor([0, 0, 0]), requires_grad=True) - >>> h = v.register_hook(lambda grad: grad * 2) # double the gradient - >>> v.backward(torch.Tensor([1, 1, 1])) - >>> v.grad.data - 2 - 2 - 2 - [torch.FloatTensor of size 3] - >>> h.remove() # removes the hook - """ - if self.volatile: - raise RuntimeError("cannot register a hook on a volatile variable") - if not self.requires_grad: - raise RuntimeError("cannot register a hook on a variable that " - "doesn't require gradient") - if self._backward_hooks is None: - self._backward_hooks = OrderedDict() - if self.creator is not None: - self.creator._register_hook_dict(self) - handle = hooks.RemovableHandle(self._backward_hooks) - self._backward_hooks[handle.id] = hook - return handle
    - -
    [docs] def reinforce(self, reward): - """Registers a reward obtained as a result of a stochastic process. - - Differentiating stochastic nodes requires providing them with reward - value. If your graph contains any stochastic operations, you should - call this function on their outputs. Otherwise an error will be raised. - - Parameters: - reward(Tensor): Tensor with per-element rewards. It has to match - the device location and shape of Variable's data. - """ - if not isinstance(self.creator, StochasticFunction): - raise RuntimeError("reinforce() can be only called on outputs " - "of stochastic functions") - self.creator._reinforce(reward)
    - -
    [docs] def detach(self): - """Returns a new Variable, detached from the current graph. - - Result will never require gradient. If the input is volatile, the output - will be volatile too. - - .. note:: - - Returned Variable uses the same data tensor, as the original one, and - in-place modifications on either of them will be seen, and may trigger - errors in correctness checks. - """ - result = NoGrad()(self) # this is needed, because it merges version counters - result._creator = None - return result
    - -
    [docs] def detach_(self): - """Detaches the Variable from the graph that created it, making it a leaf.""" - self._creator = None - self.requires_grad = False
    - - def contiguous(self): - self.data = self.data.contiguous() - return self - - def clone(self): - return Clone()(self) - - def type(self, t): - if t != type(self.data): - return Type(t)(self) - return self - - def _get_type(self, name): - module = torch._import_dotted_name(self.data.__module__) - return getattr(module, name) - - def cuda(self, device_id=None, async=False): - return CudaTransfer(device_id, async)(self) - - def cpu(self): - return self.type(getattr(torch, type(self.data).__name__)) - - def double(self): - return self.type(self._get_type('DoubleTensor')) - - def float(self): - return self.type(self._get_type('FloatTensor')) - - def half(self): - return self.type(self._get_type('HalfTensor')) - - def long(self): - return self.type(self._get_type('LongTensor')) - - def int(self): - return self.type(self._get_type('IntTensor')) - - def short(self): - return self.type(self._get_type('ShortTensor')) - - def char(self): - return self.type(self._get_type('CharTensor')) - - def byte(self): - return self.type(self._get_type('ByteTensor')) - - def is_same_size(self, other_var): - return self.data.is_same_size(other_var.data) - - def _add(self, other, inplace): - if isinstance(other, Variable): - return Add(inplace)(self, other) - else: - assert not torch.is_tensor(other) - return AddConstant(other, inplace)(self) - - def add(self, other): - return self._add(other, False) - - def add_(self, other): - return self._add(other, True) - - def _sub(self, other, inplace): - if isinstance(other, Variable): - return Sub(inplace=inplace)(self, other) - else: - assert not torch.is_tensor(other) - return SubConstant(other, inplace=inplace)(self) - - def sub(self, other): - return self._sub(other, False) - - def sub_(self, other): - return self._sub(other, True) - - def mul(self, other): - if isinstance(other, Variable): - return Mul()(self, other) - else: - assert not torch.is_tensor(other) - return MulConstant(other)(self) - - def mul_(self, other): - if not isinstance(other, Variable) and not torch.is_tensor(other): - return MulConstant(other, inplace=True)(self) - raise RuntimeError("mul_ only supports scalar multiplication") - - def div(self, other): - if isinstance(other, Variable): - return Div()(self, other) - else: - assert not torch.is_tensor(other) - return DivConstant(other)(self) - - def div_(self, other): - if not isinstance(other, Variable) and not torch.is_tensor(other): - return DivConstant(other, inplace=True)(self) - raise RuntimeError("div_ only supports scalar multiplication") - - def pow(self, other): - if isinstance(other, Variable): - return Pow()(self, other) - else: - assert not torch.is_tensor(other) - return PowConstant(other)(self) - - def exp(self): - return Exp()(self) - - def exp_(self): - return Exp(inplace=True)(self) - - def log(self): - return Log()(self) - - def log1p(self): - return Log1p()(self) - - def neg(self): - return Negate()(self) - - def neg_(self): - return Negate(inplace=True)(self) - - def tanh(self): - return Tanh()(self) - - def tanh_(self): - return Tanh(True)(self) - - def sigmoid(self): - return Sigmoid()(self) - - def sigmoid_(self): - return Sigmoid(True)(self) - - def sin(self): - return Sin()(self) - - def cos(self): - return Cos()(self) - - def tan(self): - return Tan()(self) - - def asin(self): - return Asin()(self) - - def acos(self): - return Acos()(self) - - def atan(self): - return Atan()(self) - - def sinh(self): - return Sinh()(self) - - def cosh(self): - return Cosh()(self) - - def abs(self): - return Abs()(self) - - def clamp(self, min=None, max=None): - if min is None and max is None: - raise ValueError("clamp requires specifying at least one of " - "min and max arguments") - elif min is None and max is not None: - return CminConstant(max)(self) - elif min is not None and max is None: - return CmaxConstant(min)(self) - else: - return Clamp(min, max)(self) - - def reciprocal(self): - return Reciprocal()(self) - - def floor(self): - return Floor()(self) - - def ceil(self): - return Ceil()(self) - - def frac(self): - return Frac()(self) - - def sqrt(self): - return Sqrt()(self) - - def round(self): - return Round()(self) - - def sign(self): - return Sign()(self) - - def trunc(self): - return Trunc()(self) - - def fmod(self, value): - return Fmod(value)(self) - - def remainder(self, value): - return Remainder(value)(self) - - def lerp(self, tensor, weight): - return Lerp(weight)(self, tensor) - - def rsqrt(self): - return Rsqrt()(self) - - def sum(self, dim=None): - return Sum(dim)(self) - - def prod(self, dim=None): - return Prod(dim)(self) - - def mean(self, dim=None): - return Mean(dim)(self) - - def max(self, dim=None): - if isinstance(dim, Variable): - return Cmax()(self, dim) - return Max(dim)(self) - - def min(self, dim=None): - if isinstance(dim, Variable): - return Cmin()(self, dim) - return Min(dim)(self) - - def mode(self, dim): - return Mode(dim)(self) - - def median(self, dim): - return Median(dim)(self) - - def kthvalue(self, dim): - return Kthvalue(dim)(self) - - def sort(self, dim=None, descending=False): - return Sort(dim, descending)(self) - - def topk(self, k, dim=None, largest=True, sorted=True): - return Topk(k, dim, largest, sorted)(self) - - def view(self, *sizes): - return View(*sizes)(self) - - def view_as(self, tensor): - return View(*tensor.size())(self) - - def split(self, split_size, dim=0): - return torch.split(self, split_size, dim) - - def repeat(self, *repeats): - if len(repeats) == 1 and isinstance(repeats[0], torch.Size): - repeats = repeats[0] - else: - repeats = torch.Size(repeats) - return Repeat(repeats)(self) - - def var(self, dim=None, unbiased=True): - mean = self.mean(dim) - if dim is None: - mean = mean.view(*(1 for s in self.size())) - mean_expanded = mean.expand_as(self) - zero_centered = self.sub(mean_expanded) - var = zero_centered.mul(zero_centered).sum(dim) - numel = self.numel() if dim is None else self.size(dim) - return var.div(numel - int(unbiased)) - - def std(self, dim=None, unbiased=True): - return self.var(dim, unbiased).sqrt() - - def renorm(self, norm_type, dim, maxnorm): - t = self.transpose(dim, 0) - flat = t.contiguous().view(self.size(0), -1) - norms = flat.norm(norm_type, 1) - norms = norms.clamp(max=maxnorm).div(norms.add(1e-7)) - flat_out = flat.mul(norms.expand_as(flat)) - return flat_out.view(t.size()).transpose(dim, 0) - - @staticmethod - def _static_blas(cls, args, inplace): - num_args = len(args) - alpha = beta = 1 - if num_args > 5: - raise RuntimeError("too many args") - if num_args == 5: - alpha, beta = args[1:3] - if num_args == 4: - alpha = args[1] - return cls(alpha, beta, inplace)(*(args[:1] + args[-2:])) - - def _blas(self, cls, args, inplace): - return self._static_blas(cls, (self,) + args, inplace) - - def mm(self, matrix): - output = Variable(self.data.new(self.data.size(0), matrix.data.size(1))) - return self._static_blas(Addmm, (output, 0, 1, self, matrix), False) - - def bmm(self, batch): - output = Variable(self.data.new(self.data.size(0), self.data.size(1), - batch.data.size(2))) - return self._static_blas(Baddbmm, (output, 0, 1, self, batch), False) - - def mv(self, vector): - output = Variable(self.data.new(self.data.size(0))) - return self._static_blas(Addmv, (output, 0, 1, self, vector), False) - - def ger(self, vector): - output = Variable(self.data.new(self.data.size(0), vector.data.size(0))) - return self._static_blas(Addr, (output, 0, 1, self, vector), False) - - def resize(self, *sizes): - return Resize(*sizes)(self) - - def resize_as(self, variable): - return Resize(*variable.size())(self) - - def addmm(self, *args): - return self._blas(Addmm, args, False) - - def addmm_(self, *args): - return self._blas(Addmm, args, True) - - def addbmm(self, *args): - return self._blas(Addbmm, args, False) - - def addbmm_(self, *args): - return self._blas(Addbmm, args, True) - - def baddbmm(self, *args): - return self._blas(Baddbmm, args, False) - - def baddbmm_(self, *args): - return self._blas(Baddbmm, args, True) - - def addmv(self, *args): - return self._blas(Addmv, args, False) - - def addmv_(self, *args): - return self._blas(Addmv, args, True) - - def addr(self, *args): - return self._blas(Addr, args, False) - - def addr_(self, *args): - return self._blas(Addr, args, True) - - def dot(self, other): - return Dot()(self, other) - - def _addcop(self, op, args): - if len(args) == 3: - # scale, tensor1, tensor2 - return op(args[0])(self, *args[1:]) - else: - # tensor1, tensor2 - return op()(self, *args) - - def addcmul(self, *args): - return self._addcop(Addcmul, args) - - def addcdiv(self, *args): - return self._addcop(Addcdiv, args) - - def norm(self, norm_type=2, dim=None): - return Norm(norm_type, dim)(self) - - def dist(self, tensor, norm_type=2): - return Norm(norm_type)(self - tensor) - - def index_add(self, dim, index, tensor): - return IndexAdd(dim)(self, index, tensor) - - def index_add_(self, dim, index, tensor): - return IndexAdd(dim, True)(self, index, tensor) - - def index_copy(self, dim, index, tensor): - return IndexCopy(dim)(self, index, tensor) - - def index_copy_(self, dim, index, tensor): - return IndexCopy(dim, True)(self, index, tensor) - - def index_fill(self, dim, index, value): - return IndexFill(dim, value)(self, index) - - def index_fill_(self, dim, index, value): - return IndexFill(dim, value, True)(self, index) - - def index_select(self, dim, index): - return IndexSelect(dim)(self, index) - - def gather(self, dim, index): - return Gather(dim)(self, index) - - def scatter(self, dim, index, source): - return Scatter(dim)(self, index, source) - - def scatter_(self, dim, index, source): - return Scatter(dim, True)(self, index, source) - - def masked_copy(self, mask, variable): - return MaskedCopy()(self, mask, variable) - - def masked_copy_(self, mask, variable): - return MaskedCopy(True)(self, mask, variable) - - def masked_fill(self, mask, value): - return MaskedFill(value)(self, mask) - - def masked_fill_(self, mask, value): - return MaskedFill(value, True)(self, mask) - - def masked_select(self, mask): - return MaskedSelect()(self, mask) - - def expand(self, *sizes): - if isinstance(sizes[0], torch.Size): - if len(sizes) > 1: - raise ValueError("expand expects a several ints or a single " - "torch.Size argument") - sizes = sizes[0] - return Expand(sizes)(self) - - def expand_as(self, tensor): - return Expand(tensor.size())(self) - - def t(self): - return Transpose(0, 1)(self) - - def transpose(self, dim1, dim2): - return Transpose(dim1, dim2)(self) - - def select(self, dim, _index): - index = tuple(slice(None, None) for _ in range(dim)) + (_index,) - return Index(index)(self) - - def narrow(self, dim, start_index, length): - index = tuple(slice(None, None) for _ in range(dim)) + \ - (slice(start_index, start_index + length),) - - return Index(index)(self) - - def chunk(self, num_chunks, dim=0): - return Chunk(num_chunks, dim)(self) - - def squeeze(self, dim=None): - return Squeeze(dim)(self) - - def unsqueeze(self, dim): - return Unsqueeze(dim)(self) - - def permute(self, *permutation): - return Permute(permutation)(self) - - def diag(self, diagonal_idx=0): - return Diag(diagonal_idx)(self) - - def tril(self, diagonal_idx=0): - return Tril(diagonal_idx)(self) - - def triu(self, diagonal_idx=0): - return Triu(diagonal_idx)(self) - - def trace(self): - return Trace()(self) - - def multinomial(self, num_samples=1, with_replacement=False): - return Multinomial(num_samples, with_replacement)(self) - - def bernoulli(self): - return Bernoulli()(self) - - def eq(self, other): - if isinstance(other, Variable): - return Eq()(self, other) - assert not torch.is_tensor(other), "can't compare Variable and tensor" - return Eq(other)(self) - - def ne(self, other): - if isinstance(other, Variable): - return Ne()(self, other) - assert not torch.is_tensor(other), "can't compare Variable and tensor" - return Ne(other)(self) - - def gt(self, other): - if isinstance(other, Variable): - return Gt()(self, other) - assert not torch.is_tensor(other), "can't compare Variable and tensor" - return Gt(other)(self) - - def ge(self, other): - if isinstance(other, Variable): - return Ge()(self, other) - assert not torch.is_tensor(other), "can't compare Variable and tensor" - return Ge(other)(self) - - def lt(self, other): - if isinstance(other, Variable): - return Lt()(self, other) - assert not torch.is_tensor(other), "can't compare Variable and tensor" - return Lt(other)(self) - - def le(self, other): - if isinstance(other, Variable): - return Le()(self, other) - assert not torch.is_tensor(other), "can't compare Variable and tensor" - return Le(other)(self) - - def __add__(self, other): - return self.add(other) - __radd__ = __add__ - - def __iadd__(self, other): - return self.add_(other) - - def __sub__(self, other): - return self.sub(other) - - def __isub__(self, other): - return self.sub_(other) - - def __rsub__(self, other): - return SubConstant(other, sub_tensor=True)(self) - - def __mul__(self, other): - return self.mul(other) - __rmul__ = __mul__ - - def __imul__(self, other): - return self.mul_(other) - - def __matmul__(self, other): - dim_self = self.dim() - try: - dim_other = other.dim() - except AttributeError: # not a Variable - return NotImplemented - if dim_self == 1 and dim_other == 1: - return self.dot(other) - if dim_self == 2 and dim_other == 1: - return self.mv(other) - if dim_self == 1 and dim_other == 2: - return self.unsqueeze(0).mm(other).squeeze(0) - elif dim_self == 2 and dim_other == 2: - return self.mm(other) - raise ValueError("both arguments to __matmul__ need to be 1D or 2D, " - "but they are {}D and {}D".format(dim_self, dim_other)) - - def __div__(self, other): - return self.div(other) - __truediv__ = __div__ - - def __rdiv__(self, other): - return DivConstant(other, div_by_tensor=True)(self) - __rtruediv__ = __rdiv__ - - def __idiv__(self, other): - return self.div_(other) - - def __pow__(self, other): - return self.pow(other) - - def __ipow__(self, other): - raise NotImplementedError("in-place pow not implemented") - - def __rpow__(self, other): - return PowConstant(other, tensor_power=True)(self) - - def __neg__(self): - return Negate()(self) - - def __len__(self): - return len(self.data) - - def __iter__(self): - return iter(map(lambda i: self[i], range(self.size(0)))) - - def __mod__(self, other): - return self.remainder(other) - - def __eq__(self, other): - return self.eq(other) - - def __ne__(self, other): - return self.ne(other) - - def __lt__(self, other): - return self.lt(other) - - def __le__(self, other): - return self.le(other) - - def __gt__(self, other): - return self.gt(other) - - def __ge__(self, other): - return self.ge(other) - - def __hash__(self): - return id(self) - - class _torch(object): - - @staticmethod - def cat(iterable, dim=0): - return Concat(dim)(*iterable) - - @staticmethod - def normal(means, std=1): - if isinstance(std, Variable): - return Normal()(means, std) - else: - return Normal(std)(means) - - @staticmethod - def _blas(cls, args, inplace): - num_args = len(args) - alpha = beta = 1 - if num_args > 5: - raise RuntimeError("too many args") - if num_args == 5: - alpha, beta = args[0], args[2] - tensors = args[1:2] + args[3:] - elif num_args == 4: - alpha = args[0] - tensors = args[1:] - else: - tensors = args - return cls(alpha, beta, inplace)(*tensors) - - @classmethod - def addmm(cls, *args): - return cls._blas(Addmm, args, False) - - @classmethod - def addbmm(cls, *args): - return cls._blas(Addbmm, args, False) - - @classmethod - def baddbmm(cls, *args): - return cls._blas(Baddbmm, args, False) - - @classmethod - def addmv(cls, *args): - return cls._blas(Addmv, args, False) - - @classmethod - def addr(cls, *args): - return cls._blas(Addr, args, False)
    - - -for method in dir(Variable): - # This will also wrap some methods that normally aren't part of the - # funcitonal interface, but we don't care, as they won't ever be used - if method.startswith('_') or method.endswith('_'): - continue - if hasattr(Variable._torch, method): - continue - as_static = staticmethod(getattr(Variable, method)) - setattr(Variable._torch, method, as_static) - - -from .engine import ImperativeEngine -Variable._execution_engine = ImperativeEngine() -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/cuda.html b/docs/_modules/torch/cuda.html deleted file mode 100644 index 153c45a6fb3a..000000000000 --- a/docs/_modules/torch/cuda.html +++ /dev/null @@ -1,972 +0,0 @@ - - - - - - - - - - - torch.cuda — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.cuda

    -"""
    -This package adds support for CUDA tensor types, that implement the same
    -function as CPU tensors, but they utilize GPUs for computation.
    -
    -It is lazily initialized, so you can always import it, and use
    -:func:`is_available()` to determine if your system supports CUDA.
    -
    -:ref:`cuda-semantics` has more details about working with CUDA.
    -"""
    -
    -import contextlib
    -import platform
    -import ctypes
    -import os
    -import torch
    -from multiprocessing.util import register_after_fork as _register_after_fork
    -
    -_initialized = False
    -_in_bad_fork = False
    -_original_pid = False
    -_cudart = None
    -
    -
    -
    [docs]def is_available(): - """Returns a bool indicating if CUDA is currently available.""" - if (not hasattr(torch._C, '_cuda_isDriverSufficient') or - not torch._C._cuda_isDriverSufficient()): - return False - try: - return torch._C._cuda_getDeviceCount() > 0 - except RuntimeError as e: - if 'no CUDA-capable device is detected' in e.args[0]: - return False - raise
    - - -def _sleep(cycles): - torch._C._cuda_sleep(cycles) - - -def _load_cudart(): - # First check the main program for CUDA symbols - lib = ctypes.cdll.LoadLibrary(None) - if hasattr(lib, 'cudaGetErrorName'): - return lib - - raise RuntimeError( - "couldn't find libcudart. Make sure CUDA libraries are installed in a" - "default location, or that they're in {}." - .format('DYLD_LIBRARY_PATH' if platform.system() == 'Darwin' else - 'LD_LIBRARY_PATH')) - - -def _check_driver(): - if not hasattr(torch._C, '_cuda_isDriverSufficient'): - raise AssertionError("Torch not compiled with CUDA enabled") - if not torch._C._cuda_isDriverSufficient(): - if torch._C._cuda_getDriverVersion() == 0: - # found no NVIDIA driver on the system - raise AssertionError(""" -Found no NVIDIA driver on your system. Please check that you -have an NVIDIA GPU and installed a driver from -http://www.nvidia.com/Download/index.aspx""") - else: - # TODO: directly link to the alternative bin that needs install - raise AssertionError(""" -The NVIDIA driver on your system is too old (found version {}). -Please update your GPU driver by downloading and installing a new -version from the URL: http://www.nvidia.com/Download/index.aspx -Alternatively, go to: https://pytorch.org/binaries to install -a PyTorch version that has been compiled with your version -of the CUDA driver.""".format(str(torch._C._cuda_getDriverVersion()))) - - -def _lazy_init(): - global _initialized, _cudart, _original_pid - if _initialized: - return - if _in_bad_fork: - from sys import version_info - if version_info < (3, 4): - msg = ("To use CUDA with multiprocessing, you must use Python " - "3.4+ and the 'spawn' start method") - else: - msg = ("To use CUDA with multiprocessing, you must use the " - "'spawn' start method") - raise RuntimeError( - "Cannot re-initialize CUDA in forked subprocess. " + msg) - _check_driver() - assert torch._C._cuda_init() - assert torch._C._cuda_sparse_init() - _cudart = _load_cudart() - _cudart.cudaGetErrorName.restype = ctypes.c_char_p - _cudart.cudaGetErrorString.restype = ctypes.c_char_p - _original_pid = os.getpid() - _initialized = True - - -def _after_fork(arg): - global _initialized, _in_bad_fork - if _initialized and _original_pid != os.getpid(): - _initialized = False - _in_bad_fork = True - - -_register_after_fork(_after_fork, _after_fork) - - -def cudart(): - _lazy_init() - return _cudart - - -
    [docs]class device(object): - """Context-manager that changes the selected device. - - Arguments: - idx (int): device index to select. It's a no-op if this argument - is negative. - """ - - def __init__(self, idx): - self.idx = idx - self.prev_idx = -1 - - def __enter__(self): - if self.idx is -1: - return - _lazy_init() - self.prev_idx = torch._C._cuda_getDevice() - if self.prev_idx != self.idx: - torch._C._cuda_setDevice(self.idx) - - def __exit__(self, *args): - if self.prev_idx != self.idx: - torch._C._cuda_setDevice(self.prev_idx) - return False
    - - -
    [docs]class device_of(device): - """Context-manager that changes the current device to that of given object. - - You can use both tensors and storages as arguments. If a given object is - not allocated on a GPU, this is a no-op. - - Arguments: - obj (Tensor or Storage): object allocated on the selected device. - """ - - def __init__(self, obj): - idx = obj.get_device() if obj.is_cuda else -1 - super(device_of, self).__init__(idx)
    - - -
    [docs]def set_device(device): - """Sets the current device. - - Usage of this function is discouraged in favor of :any:`device`. In most - cases it's better to use ``CUDA_VISIBLE_DEVICES`` environmental variable. - - Arguments: - device (int): selected device. This function is a no-op if this - argument is negative. - """ - if device >= 0: - torch._C._cuda_setDevice(device)
    - - -@contextlib.contextmanager -
    [docs]def stream(stream): - """Context-manager that selects a given stream. - - All CUDA kernels queued within its context will be enqueued on a selected - stream. - - Arguments: - stream (Stream): selected stream. This manager is a no-op if it's - ``None``. - """ - if stream is None: - yield - return - prev_stream = current_stream() - torch._C._cuda_setStream(stream._cdata) - try: - yield - finally: - torch._C._cuda_setStream(prev_stream._cdata)
    - - -
    [docs]def device_count(): - """Returns the number of GPUs available.""" - if is_available(): - _lazy_init() - return torch._C._cuda_getDeviceCount() - else: - return 0
    - - -
    [docs]def current_device(): - """Returns the index of a currently selected device.""" - _lazy_init() - return torch._C._cuda_getDevice()
    - - -
    [docs]def synchronize(): - """Waits for all kernels in all streams on current device to complete.""" - _lazy_init() - return torch._C._cuda_synchronize()
    - - -
    [docs]def current_stream(): - """Returns a currently selected :class:`Stream`.""" - _lazy_init() - return torch.cuda.Stream(_cdata=torch._C._cuda_getCurrentStream())
    - - -
    [docs]def current_blas_handle(): - """Returns cublasHandle_t pointer to current cuBLAS handle""" - return torch._C._cuda_getCurrentBlasHandle()
    - - -def _host_allocator(): - _lazy_init() - return torch._C._cuda_cudaHostAllocator() - - -@contextlib.contextmanager -def _free_mutex(): - torch._C._cuda_lock_mutex() - try: - yield - finally: - torch._C._cuda_unlock_mutex() - - -from .random import * - -################################################################################ -# Define Storage and Tensor classes -################################################################################ - - -from ..tensor import _TensorBase -from ..storage import _StorageBase - - -def _dummy_type(name): - def init_err(self): - class_name = self.__class__.__name__ - raise RuntimeError( - "Tried to instantiate dummy base class {}".format(class_name)) - return type(storage_name, (object,), {"__init__": init_err}) - - -if not hasattr(torch._C, 'CudaDoubleStorageBase'): - # Define dummy base classes - for t in ['Double', 'Float', 'Long', 'Int', 'Short', 'Char', 'Byte', 'Half']: - storage_name = 'Cuda{0}StorageBase'.format(t) - tensor_name = 'Cuda{0}TensorBase'.format(t) - - torch._C.__dict__[storage_name] = _dummy_type(storage_name) - torch._C.__dict__[tensor_name] = _dummy_type(tensor_name) - - torch._C.__dict__['_CudaStreamBase'] = _dummy_type('CudaStreamBase') - - -class _CudaBase(object): - is_cuda = True - is_sparse = False - - def type(self, *args, **kwargs): - with device(self.get_device()): - return super(_CudaBase, self).type(*args, **kwargs) - - def __new__(cls, *args, **kwargs): - _lazy_init() - # We need this method only for lazy init, so we can remove it - del _CudaBase.__new__ - return super(_CudaBase, cls).__new__(cls, *args, **kwargs) - - -class DoubleStorage(_CudaBase, torch._C.CudaDoubleStorageBase, _StorageBase): - pass - - -class FloatStorage(_CudaBase, torch._C.CudaFloatStorageBase, _StorageBase): - pass - - -class LongStorage(_CudaBase, torch._C.CudaLongStorageBase, _StorageBase): - pass - - -class IntStorage(_CudaBase, torch._C.CudaIntStorageBase, _StorageBase): - pass - - -class ShortStorage(_CudaBase, torch._C.CudaShortStorageBase, _StorageBase): - pass - - -class CharStorage(_CudaBase, torch._C.CudaCharStorageBase, _StorageBase): - pass - - -class ByteStorage(_CudaBase, torch._C.CudaByteStorageBase, _StorageBase): - pass - - -class HalfStorage(_CudaBase, torch._C.CudaHalfStorageBase, _StorageBase): - pass - - -class DoubleTensor(_CudaBase, torch._C.CudaDoubleTensorBase, _TensorBase): - - def is_signed(self): - return True - - @classmethod - def storage_type(cls): - return DoubleStorage - - -class FloatTensor(_CudaBase, torch._C.CudaFloatTensorBase, _TensorBase): - - def is_signed(self): - return True - - @classmethod - def storage_type(cls): - return FloatStorage - - -class LongTensor(_CudaBase, torch._C.CudaLongTensorBase, _TensorBase): - - def is_signed(self): - return True - - @classmethod - def storage_type(cls): - return LongStorage - - -class IntTensor(_CudaBase, torch._C.CudaIntTensorBase, _TensorBase): - - def is_signed(self): - return True - - @classmethod - def storage_type(cls): - return IntStorage - - -class ShortTensor(_CudaBase, torch._C.CudaShortTensorBase, _TensorBase): - - def is_signed(self): - return True - - @classmethod - def storage_type(cls): - return ShortStorage - - -class CharTensor(_CudaBase, torch._C.CudaCharTensorBase, _TensorBase): - - def is_signed(self): - # TODO - return False - - @classmethod - def storage_type(cls): - return CharStorage - - -class ByteTensor(_CudaBase, torch._C.CudaByteTensorBase, _TensorBase): - - def is_signed(self): - return False - - @classmethod - def storage_type(cls): - return ByteStorage - - -class HalfTensor(_CudaBase, torch._C.CudaHalfTensorBase, _TensorBase): - - def is_signed(self): - return True - - @classmethod - def storage_type(): - return HalfStorage - - -torch._storage_classes.add(DoubleStorage) -torch._storage_classes.add(FloatStorage) -torch._storage_classes.add(LongStorage) -torch._storage_classes.add(IntStorage) -torch._storage_classes.add(ShortStorage) -torch._storage_classes.add(CharStorage) -torch._storage_classes.add(ByteStorage) -torch._storage_classes.add(HalfStorage) - -torch._tensor_classes.add(DoubleTensor) -torch._tensor_classes.add(FloatTensor) -torch._tensor_classes.add(LongTensor) -torch._tensor_classes.add(IntTensor) -torch._tensor_classes.add(ShortTensor) -torch._tensor_classes.add(CharTensor) -torch._tensor_classes.add(ByteTensor) -torch._tensor_classes.add(HalfTensor) - -from . import sparse -from .streams import Stream, Event -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/cuda/comm.html b/docs/_modules/torch/cuda/comm.html deleted file mode 100644 index 21f1ca7dd36d..000000000000 --- a/docs/_modules/torch/cuda/comm.html +++ /dev/null @@ -1,806 +0,0 @@ - - - - - - - - - - - torch.cuda.comm — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.cuda.comm

    -import torch
    -from . import nccl
    -from torch._utils import _accumulate
    -
    -# TODO: sync streams when implemented
    -
    -
    -
    [docs]def broadcast(tensor, devices): - """Broadcasts a tensor to a number of GPUs. - - Arguments: - tensor (Tensor): tensor to broadcast. - devices (Iterable): an iterable of devices among which to broadcast. - Note that it should be like (src, dst1, dst2, ...), the first element - of which is the source device to broadcast from. - - Returns: - A tuple containing copies of the ``tensor``, placed on devices - corresponding to indices from ``devices``. - """ - if nccl.is_available([tensor]) and len(set(devices)) == len(devices): - tensors = [tensor] - for device in devices[1:]: - with torch.cuda.device(device): - tensors.append(type(tensor)(tensor.size())) - nccl.broadcast(tensors) - return tuple(tensors) - - # TODO: copy to a pinned buffer first (if copy is from CPU) - return tuple(tensor.cuda(gpu, async=True) for gpu in devices)
    - - -def broadcast_coalesced(tensors, devices, buffer_size=10485760): - """Broadcasts a sequence tensors to the specified GPUs. - - Small tensors are first coalesced into a buffer to reduce the number - of synchronizations. - - Arguments: - tensors (sequence): tensors to broadcast. - devices (Iterable): an iterable of devices among which to broadcast. - Note that it should be like (src, dst1, dst2, ...), the first element - of which is the source device to broadcast from. - buffer_size (int): maximum size of the buffer used for coalescing - - Returns: - A tuple containing copies of the ``tensor``, placed on devices - corresponding to indices from ``devices``. - """ - for tensor in tensors: - if tensor.get_device() != devices[0]: - raise RuntimeError('all tensors must be on devices[0]') - outputs = [[] for _ in devices] - # use the original tensors for the first device - outputs[0].extend(tensors) - for chunk in _take_tensors(tensors, buffer_size): - results = broadcast(_flatten_tensors(chunk), devices) - # use the broadcasted tensors for the remaining devices - for dst, res in zip(outputs[1:], results[1:]): - dst.extend(_unflatten_tensors(res, chunk)) - return tuple(outputs) - - -
    [docs]def reduce_add(inputs, destination=None): - """Sums tensors from multiple GPUs. - - All inputs should have matching shapes. - - Arguments: - inputs (Iterable[Tensor]): an iterable of tensors to add. - destination (int, optional): a device on which the output will be - placed (default: current device). - - Returns: - A tensor containing an elementwise sum of all inputs, placed on the - ``destination`` device. - """ - # TODO: try to find an input on another gpu, copy it, - # and accumulate into the copy - input_size = inputs[0].size() - for i, inp in enumerate(inputs): - assert inp.is_cuda, "reduce_add expects all inputs to be on GPUs" - if inp.size() != input_size: - got = 'x'.join(str(x) for x in inp.size()) - expected = 'x'.join(str(x) for x in input_size) - raise ValueError("input {} has invalid size: got {}, but expected " - "{}".format(i, got, expected)) - if destination is None: - destination = torch.cuda.current_device() - with torch.cuda.device(destination): - result = type(inp)(input_size).zero_() - - if nccl.is_available(inputs) and inputs[0].get_device() == destination: - outputs = [result] + [t.new(t.size()) for t in inputs[1:]] - nccl.reduce(inputs, outputs) - return result - - for inp in inputs: - input_correct_gpu = inp.cuda(result.get_device()) - result.add_(input_correct_gpu) - return result
    - - -def reduce_add_coalesced(inputs, destination=None, buffer_size=10485760): - """Sums tensors from multiple GPUs. - - Small tensors are first coalesced into a buffer to reduce the number - of synchronizations. - - Arguments: - inputs (Iterable[Tensor]): an iterable of tensors to add. - destination (int, optional): a device on which the output will be - placed (default: current device). - buffer_size (int): maximum size of the buffer used for coalescing - - Returns: - A tuple of tensors containing an elementwise sum of each group of - inputs, placed on the ``destination`` device. - """ - output = [] - itrs = [_take_tensors(tensors, buffer_size) for tensors in inputs] - for chunks in zip(*itrs): - flattened = [_flatten_tensors(chunk) for chunk in chunks] - result = reduce_add(flattened, destination) - output.extend(_unflatten_tensors(result, chunks[0])) - return tuple(output) - - -
    [docs]def scatter(tensor, devices, chunk_sizes=None, dim=0, streams=None): - """Scatters tensor across multiple GPUs. - - Arguments: - tensor (Tensor): tensor to scatter. - devices (Iterable[int]): iterable of ints, specifying among which - devices the tensor should be scattered. - chunk_sizes (Iterable[int], optional): sizes of chunks to be placed on - each device. It should match ``devices`` in length and sum to - ``tensor.size(dim)``. If not specified, the tensor will be divided - into equal chunks. - dim (int, optional): A dimension along which to chunk the tensor. - - Returns: - A tuple containing chunks of the ``tensor``, spread accross given - ``devices``. - """ - if chunk_sizes is None: - chunks = tensor.chunk(len(devices), dim) - else: - assert sum(chunk_sizes) == tensor.size(dim), "given chunk sizes " \ - "don't sum up to the tensor's size (sum(chunk_sizes) == {}, but " \ - "expected {})".format(sum(chunk_sizes), tensor.size(dim)) - assert min(chunk_sizes) > 0, "got a negative chunk_size" - chunks = [tensor.narrow(dim, start - size, size) - for start, size in zip(_accumulate(chunk_sizes), chunk_sizes)] - chunks = tuple(chunk.contiguous() for chunk in chunks) - # TODO: copy to a pinned buffer first (if copying from CPU) - if streams is None: - streams = [None] * len(devices) - outputs = [] - for device, chunk, stream in zip(devices, chunks, streams): - with torch.cuda.device(device), torch.cuda.stream(stream): - outputs.append(chunk.cuda(device, async=True)) - return tuple(outputs)
    - - -
    [docs]def gather(tensors, dim=0, destination=None): - """Gathers tensors from multiple GPUs. - - Tensor sizes in all dimension different than ``dim`` have to match. - - Arguments: - tensors (Iterable[Tensor]): iterable of tensors to gather. - dim (int): a dimension along which the tensors will be concatenated. - destination (int, optional): output device (-1 means CPU, default: - current device) - - Returns: - A tensor located on ``destination`` device, that is a result of - concatenating ``tensors`` along ``dim``. - """ - total_size = 0 - expected_size = list(tensors[0].size()) - for tensor in tensors: - assert tensor.is_cuda, "gather expects all inputs to be on GPUs" - expected_size[dim] = tensor.size(dim) - if list(tensor.size()) != expected_size: - got = 'x'.join(str(x) for x in tensor.size()) - expected = 'x'.join(str(x) for x in expected_size) - raise ValueError("gather got an input of invalid size: got {}, " - "but expected {}".format(got, expected)) - total_size += tensor.size(dim) - expected_size[dim] = total_size - expected_size = torch.Size(expected_size) - if destination is None: - destination = torch.cuda.current_device() - if destination == -1: - result = getattr(torch, type(tensors[0]).__name__)(expected_size) - else: - with torch.cuda.device(destination): - result = type(tensors[0])(expected_size) - - chunk_start = 0 - # TODO: if copying to CPU, allocate a pinned buffer, do async copies to it, - # and copy it to regular memory - for tensor in tensors: - result.narrow(dim, chunk_start, tensor.size(dim)).copy_(tensor, True) - chunk_start += tensor.size(dim) - return result
    - - -def _flatten_tensors(tensors): - """Flatten tensors into a single contiguous 1D buffer""" - if len(tensors) == 1: - return tensors[0].contiguous().view(-1) - size = sum(tensor.numel() for tensor in tensors) - offset = 0 - flat = tensors[0].new(size) - for tensor in tensors: - flat.narrow(0, offset, tensor.numel()).copy_(tensor) - offset += tensor.numel() - return flat - - -def _unflatten_tensors(flat, tensors): - """View a flat buffer using the sizes of tensors""" - outputs = [] - offset = 0 - for tensor in tensors: - outputs.append(flat.narrow(0, offset, tensor.numel()).view_as(tensor)) - offset += tensor.numel() - return tuple(outputs) - - -def _take_tensors(tensors, size_limit): - """Groups tensors into lists of up to size_limit bytes""" - buf = [] - size = 0 - for tensor in tensors: - param_size = tensor.numel() * tensor.element_size() - if size + param_size > size_limit and size > 0: - yield buf - size = 0 - buf = [] - buf.append(tensor) - size += param_size - if len(buf) > 0: - yield buf -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/cuda/streams.html b/docs/_modules/torch/cuda/streams.html deleted file mode 100644 index d8bfd8cd8c08..000000000000 --- a/docs/_modules/torch/cuda/streams.html +++ /dev/null @@ -1,761 +0,0 @@ - - - - - - - - - - - torch.cuda.streams — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.cuda.streams

    -import ctypes
    -import torch
    -from . import cudart
    -
    -
    -SUCCESS = 0
    -ERROR_NOT_READY = 34
    -
    -
    -class CudaError(RuntimeError):
    -
    -    def __init__(self, code):
    -        msg = cudart().cudaGetErrorString(code).decode('utf-8')
    -        super(CudaError, self).__init__('{0} ({1})'.format(msg, code))
    -
    -
    -def check_error(res):
    -    if res != SUCCESS:
    -        raise CudaError(res)
    -
    -
    -
    [docs]class Stream(torch._C._CudaStreamBase): - """Wrapper around a CUDA stream. - - Arguments: - device(int, optional): a device on which to allocate the Stream. - priority(int, optional): priority of the stream. Lower numbers - represent higher priorities. - """ - - def __new__(cls, device=-1, priority=0, **kwargs): - with torch.cuda.device(device): - return super(Stream, cls).__new__(cls, priority=priority, **kwargs) - -
    [docs] def wait_event(self, event): - """Makes all future work submitted to the stream wait for an event. - - Arguments: - event (Event): an event to wait for. - """ - check_error(cudart().cudaStreamWaitEvent(self, event, ctypes.c_int(0)))
    - -
    [docs] def wait_stream(self, stream): - """Synchronizes with another stream. - - All future work submitted to this stream will wait until all kernels - submitted to a given stream at the time of call complete. - - Arguments: - stream (Stream): a stream to synchronize. - """ - self.wait_event(stream.record_event())
    - -
    [docs] def record_event(self, event=None): - """Records an event. - - Arguments: - event (Event, optional): event to record. If not given, a new one - will be allocated. - - Returns: - Recorded event. - """ - if event is None: - event = Event() - check_error(cudart().cudaEventRecord(event, self)) - return event
    - -
    [docs] def query(self): - """Checks if all the work submitted has been completed. - - Returns: - A boolean indicating if all kernels in this stream are completed. - """ - res = cudart().cudaStreamQuery(self) - if res == ERROR_NOT_READY: - return False - check_error(res) - return True
    - -
    [docs] def synchronize(self): - """Wait for all the kernels in this stream to complete.""" - check_error(cudart().cudaStreamSynchronize(self))
    - - @staticmethod - def priority_range(): - least_priority = ctypes.c_int() - greatest_priority = ctypes.c_int() - check_error(cudart().cudaDeviceGetStreamPriorityRange( - ctypes.byref(least_priority), ctypes.byref(greatest_priority))) - return (least_priority.value, greatest_priority.value) - - @property - def priority(self): - priority = ctypes.c_int() - check_error(cudart().cudaStreamGetPriority(self, ctypes.byref(priority))) - return priority.value - - @property - def _as_parameter_(self): - return ctypes.c_void_p(self.cuda_stream) - - def __eq__(self, o): - if isinstance(o, Stream): - return o.device == self.device and o.cuda_stream == self.cuda_stream - return False - - def __hash__(self): - return hash((self.cuda_stream, self.device)) - - def __repr__(self): - return ('<torch.cuda.Stream device={0} cuda_stream={1:#x}>' - .format(self.device, self.cuda_stream))
    - - -class EventHandle(ctypes.Structure): - IPC_HANDLE_SIZE = 64 - _fields_ = [('reserved', ctypes.c_char * IPC_HANDLE_SIZE)] - - -
    [docs]class Event(object): - """Wrapper around CUDA event. - - Arguments: - enable_timing (bool): indicates if the event should measure time - (default: False) - blocking (bool): if true, :meth:`wait` will be blocking (default: False) - interprocess (bool): if true, the event can be shared between processes - (default: False) - """ - - DEFAULT = 0x0 - BLOCKING_SYNC = 0x1 - DISABLE_TIMING = 0x2 - INTERPROCESS = 0x4 - - def __init__(self, enable_timing=False, blocking=False, interprocess=False, - _handle=None): - flags = Event.DEFAULT - if not enable_timing: - flags |= Event.DISABLE_TIMING - if blocking: - flags |= Event.BLOCKING_SYNC - if interprocess: - flags |= Event.INTERPROCESS - - ptr = ctypes.c_void_p() - self._cudart = cudart() - if _handle: - check_error(self._cudart.cudaIpcOpenEventHandle(ctypes.byref(ptr), _handle)) - else: - check_error(self._cudart.cudaEventCreateWithFlags(ctypes.byref(ptr), ctypes.c_uint(flags))) - self._as_parameter_ = ptr - - def __del__(self): - if hasattr(self, '_as_parameter_'): - check_error(self._cudart.cudaEventDestroy(self._as_parameter_)) - del self._as_parameter_ - -
    [docs] def record(self, stream=None): - """Records the event in a given stream.""" - if stream is None: - stream = torch.cuda.current_stream() - stream.record_event(self)
    - -
    [docs] def wait(self, stream=None): - """Makes a given stream wait for the event.""" - if stream is None: - stream = torch.cuda.current_stream() - stream.wait_event(self)
    - -
    [docs] def query(self): - """Checks if the event has been recorded. - - Returns: - A boolean indicating if the event has been recorded. - """ - res = cudart().cudaEventQuery(self) - if res == ERROR_NOT_READY: - return False - check_error(res) - return True
    - -
    [docs] def elapsed_time(self, end_event): - """Returns the time elapsed before the event was recorded.""" - time_ms = ctypes.c_float() - check_error(cudart().cudaEventElapsedTime( - ctypes.byref(time_ms), self, end_event)) - return time_ms.value
    - -
    [docs] def synchronize(self): - """Synchronizes with the event.""" - check_error(cudart().cudaEventSynchronize(self))
    - -
    [docs] def ipc_handle(self): - """Returns an IPC handle of this event.""" - handle = EventHandle() - check_error(cudart().cudaIpcGetEventHandle(ctypes.byref(handle), self)) - return handle
    - - def __repr__(self): - return '<torch.cuda.Event {0:#x}>'.format(self._as_parameter_.value)
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/functional.html b/docs/_modules/torch/functional.html deleted file mode 100644 index 25354d51cba0..000000000000 --- a/docs/_modules/torch/functional.html +++ /dev/null @@ -1,625 +0,0 @@ - - - - - - - - - - - torch.functional — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.functional

    -import torch
    -from ._utils import _range
    -
    -
    -
    [docs]def split(tensor, split_size, dim=0): - """Splits the tensor into equally sized chunks (if possible). - - Last chunk will be smaller if the tensor size along a given dimension - is not divisible by ``split_size``. - - Arguments: - tensor (Tensor): tensor to split. - split_size (int): size of a single chunk. - dim (int): dimension along which to split the tensor. - """ - if dim < 0: - dim += tensor.dim() - dim_size = tensor.size(dim) - num_splits = (dim_size + split_size - 1) // split_size - last_split_size = split_size - (split_size * num_splits - dim_size) - - def get_split_size(i): - return split_size if i < num_splits - 1 else last_split_size - return tuple(tensor.narrow(int(dim), int(i * split_size), int(get_split_size(i))) for i - in _range(0, num_splits))
    - - -
    [docs]def chunk(tensor, chunks, dim=0): - """Splits a tensor into a number of chunks along a given dimension. - - Arguments: - tensor (Tensor): tensor to split. - chunks (int): number of chunks to return. - dim (int): dimension along which to split the tensor. - """ - if dim < 0: - dim += tensor.dim() - split_size = (tensor.size(dim) + chunks - 1) // chunks - return split(tensor, split_size, dim)
    - - -
    [docs]def stack(sequence, dim=0): - """Concatenates sequence of tensors along a new dimension. - - All tensors need to be of the same size. - - Arguments: - sqequence (Sequence): sequence of tensors to concatenate. - dim (int): dimension to insert. Has to be between 0 and the number - of dimensions of concatenated tensors (inclusive). - """ - if len(sequence) == 0: - raise TypeError("stack expects a non-empty sequence of tensors") - if dim < 0: - dim += sequence[0].dim() - return torch.cat(list(t.unsqueeze(dim) for t in sequence), dim)
    - - -
    [docs]def unbind(tensor, dim=0): - """Removes a tensor dimension. - - Returns a tuple of all slices along a given dimension, already without it. - - Arguments: - tensor (Tensor): tensor to unbind. - dim (int): dimension to remove. - """ - return tuple(tensor.select(dim, i) for i in _range(tensor.size(dim)))
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/multiprocessing.html b/docs/_modules/torch/multiprocessing.html deleted file mode 100644 index 0993a9fafecf..000000000000 --- a/docs/_modules/torch/multiprocessing.html +++ /dev/null @@ -1,624 +0,0 @@ - - - - - - - - - - - torch.multiprocessing — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.multiprocessing

    -"""
    -torch.multiprocessing is a wrapper around the native :mod:`multiprocessing`
    -module. It registers custom reducers, that use shared memory to provide shared
    -views on the same data in different processes. Once the tensor/storage is moved
    -to shared_memory (see :func:`~torch.Tensor.share_memory_`), it will be possible
    -to send it to other processes without making any copies.
    -
    -The API is 100% compatible with the original module - it's enough to change
    -``import multiprocessing`` to ``import torch.multiprocessing`` to have all the
    -tensors sent through the queues or shared via other mechanisms, moved to shared
    -memory.
    -
    -Because of the similarity of APIs we do not document most of this package
    -contents, and we recommend referring to very good docs of the original module.
    -"""
    -import sys
    -from .reductions import init_reductions
    -import multiprocessing
    -
    -__all__ = ['set_sharing_strategy', 'get_sharing_strategy',
    -           'get_all_sharing_strategies']
    -
    -
    -from multiprocessing import *
    -
    -
    -__all__ += multiprocessing.__all__
    -
    -
    -if sys.version_info < (3, 3):
    -    """Override basic classes in Python 2.7 and Python 3.3 to use ForkingPickler
    -    for serialization. Later versions of Python already use ForkingPickler."""
    -    from .queue import Queue, SimpleQueue
    -    from .pool import Pool
    -
    -
    -if sys.platform == 'darwin':
    -    _sharing_strategy = 'file_system'
    -    _all_sharing_strategies = {'file_system'}
    -else:
    -    _sharing_strategy = 'file_descriptor'
    -    _all_sharing_strategies = {'file_descriptor', 'file_system'}
    -
    -
    -
    [docs]def set_sharing_strategy(new_strategy): - """Sets the strategy for sharing CPU tensors. - - Arguments: - new_strategy (str): Name of the selected strategy. Should be one of - the values returned by :func:`get_all_sharing_strategies()`. - """ - global _sharing_strategy - assert new_strategy in _all_sharing_strategies - _sharing_strategy = new_strategy
    - - -
    [docs]def get_sharing_strategy(): - """Returns the current strategy for sharing CPU tensors.""" - return _sharing_strategy
    - - -
    [docs]def get_all_sharing_strategies(): - """Returns a set of sharing strategies supported on a current system.""" - return _all_sharing_strategies
    - - -init_reductions() -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/nn/functional.html b/docs/_modules/torch/nn/functional.html deleted file mode 100644 index deb51c0cc15d..000000000000 --- a/docs/_modules/torch/nn/functional.html +++ /dev/null @@ -1,1131 +0,0 @@ - - - - - - - - - - - torch.nn.functional — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.nn.functional

    -"""Functional interface"""
    -
    -import torch
    -from . import _functions
    -from .modules import utils
    -from ._functions.padding import ConstantPad2d
    -from .modules.utils import _single, _pair, _triple
    -
    -# Convolutions
    -ConvNd = torch._C._functions.ConvNd
    -
    -
    -
    [docs]def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, - groups=1): - """Applies a 2D convolution over an input image composed of several input - planes. - - See :class:`~torch.nn.Conv2d` for details and output shape. - - Args: - input: input tensor (minibatch x in_channels x iH x iW) - weight: filters tensor (out_channels, in_channels/groups, kH, kW) - bias: optional bias tensor (out_channels) - stride: the stride of the convolving kernel. Can be a single number or - a tuple (sh x sw). Default: 1 - padding: implicit zero padding on the input. Can be a single number or - a tuple. Default: 0 - groups: split input into groups, in_channels should be divisible by - the number of groups - - Examples: - >>> # With square kernels and equal stride - >>> filters = autograd.Variable(torch.randn(8,4,3,3)) - >>> inputs = autograd.Variable(torch.randn(1,4,5,5)) - >>> F.conv2d(inputs, filters, padding=1) - """ - f = ConvNd(_pair(stride), _pair(padding), _pair(dilation), False, - _pair(0), groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.enabled) - return f(input, weight, bias)
    - - -
    [docs]def conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, - groups=1): - """Applies a 1D convolution over an input signal composed of several input - planes. - - See :class:`~torch.nn.Conv1d` for details and output shape. - - Args: - input: input tensor of shape (minibatch x in_channels x iW) - weight: filters of shape (out_channels, in_channels, kW) - bias: optional bias of shape (out_channels) - stride: the stride of the convolving kernel, default 1 - - Examples: - >>> filters = autograd.Variable(torch.randn(33, 16, 3)) - >>> inputs = autograd.Variable(torch.randn(20, 16, 50)) - >>> F.conv1d(inputs, filters) - """ - f = ConvNd(_single(stride), _single(padding), _single(dilation), False, - _single(0), groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.enabled) - return f(input, weight, bias)
    - - -
    [docs]def conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, - groups=1): - """Applies a 3D convolution over an input image composed of several input - planes. - - See :class:`~torch.nn.Conv3d` for details and output shape. - - Args: - input: input tensor of shape (minibatch x in_channels x iT x iH x iW) - weight: filters tensor of shape (out_channels, in_channels, kT, kH, kW) - bias: optional bias tensor of shape (out_channels) - stride: the stride of the convolving kernel. Can be a single number or - a tuple (st x sh x sw). Default: 1 - padding: implicit zero padding on the input. Can be a single number or - a tuple. Default: 0 - - Examples: - >>> filters = autograd.Variable(torch.randn(33, 16, 3, 3, 3)) - >>> inputs = autograd.Variable(torch.randn(20, 16, 50, 10, 20)) - >>> F.conv3d(inputs, filters) - """ - f = ConvNd(_triple(stride), _triple(padding), _triple(dilation), False, - _triple(0), groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.enabled) - return f(input, weight, bias)
    - - -
    [docs]def conv_transpose1d(input, weight, bias=None, stride=1, padding=0, - output_padding=0, groups=1): - f = ConvNd(_single(stride), _single(padding), _single(1), True, - _single(output_padding), groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.enabled) - return f(input, weight, bias)
    - - -
    [docs]def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, - output_padding=0, groups=1): - """Applies a 2D transposed convolution operator over an input image - composed of several input planes, sometimes also called "deconvolution". - - See :class:`~torch.nn.ConvTranspose2d` for details and output shape. - - Args: - input: input tensor of shape (minibatch x in_channels x iH x iW) - weight: filters of shape (in_channels x out_channels x kH x kW) - bias: optional bias of shape (out_channels) - stride: the stride of the convolving kernel, a single number or a - tuple (sh x sw). Default: 1 - padding: implicit zero padding on the input, a single number or a - tuple (padh x padw). Default: 0 - groups: split input into groups, in_channels should be divisible by - the number of groups - output_padding: A zero-padding of 0 <= padding < stride that should be - added to the output. Can be a single number or a tuple. Default: 0 - """ - f = ConvNd(_pair(stride), _pair(padding), _pair(1), True, - _pair(output_padding), groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.enabled) - return f(input, weight, bias)
    - - -
    [docs]def conv_transpose3d(input, weight, bias=None, stride=1, padding=0, - output_padding=0, groups=1): - """Applies a 3D transposed convolution operator over an input image - composed of several input planes, sometimes also called "deconvolution" - - See :class:`~torch.nn.ConvTranspose3d` for details and output shape. - - Args: - input: input tensor of shape (minibatch x in_channels x iT x iH x iW) - weight: filters of shape (in_channels x out_channels x kH x kW) - bias: optional bias of shape (out_channels) - stride: the stride of the convolving kernel, a single number or a - tuple (sh x sw). Default: 1 - padding: implicit zero padding on the input, a single number or a - tuple (padh x padw). Default: 0 - """ - f = ConvNd(_triple(stride), _triple(padding), _triple(1), True, - _triple(output_padding), groups, torch.backends.cudnn.benchmark, torch.backends.cudnn.enabled) - return f(input, weight, bias)
    - - -# Pooling -
    [docs]def avg_pool1d(input, kernel_size, stride=None, padding=0, - ceil_mode=False, count_include_pad=True): - r"""Applies a 1D average pooling over an input signal composed of several - input planes. - - See :class:`~torch.nn.AvgPool1d` for details and output shape. - - Args: - kernel_size: the size of the window - stride: the stride of the window. Default value is :attr:`kernel_size` - padding: implicit zero padding to be added on both sides - ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape - count_include_pad: when True, will include the zero-padding in the averaging calculation - - Example: - >>> # pool of square window of size=3, stride=2 - >>> input = Variable(torch.Tensor([[[1,2,3,4,5,6,7]]])) - >>> F.avg_pool1d(input, kernel_size=3, stride=2) - Variable containing: - (0 ,.,.) = - 2 4 6 - [torch.FloatTensor of size 1x1x3] - """ - if input.dim() != 3: - raise ValueError('expected 3D input (got {} dimensions)' - .format(input.dim())) - kernel_size = _single(kernel_size) + (1,) - stride = _single(stride) + (1,) if stride is not None else kernel_size - padding = _single(padding) + (0,) - f = _functions.thnn.AvgPool2d(kernel_size, stride, padding, - ceil_mode, count_include_pad) - return f(input.unsqueeze(3)).squeeze(3)
    - - -
    [docs]def avg_pool2d(input, kernel_size, stride=None, padding=0, - ceil_mode=False, count_include_pad=True): - """Applies 2D average-pooling operation in kh x kw regions by step size - dh x dw steps. The number of output features is equal to the number of - input planes. - - See :class:`~torch.nn.AvgPool2d` for details and output shape. - - Args: - input: input tensor (minibatch x in_channels x iH x iW) - kernel_size: size of the pooling region, a single number or a - tuple (kh x kw) - stride: stride of the pooling operation, a single number or a - tuple (sh x sw). Default is equal to kernel size - padding: implicit zero padding on the input, a single number or - a tuple (padh x padw), Default: 0 - ceil_mode: operation that defines spatial output shape - count_include_pad: divide by the number of elements inside the - original non-padded image or kh * kw - """ - return _functions.thnn.AvgPool2d(kernel_size, stride, padding, - ceil_mode, count_include_pad)(input)
    - - -
    [docs]def avg_pool3d(input, kernel_size, stride=None): - """Applies 3D average-pooling operation in kt x kh x kw regions by step - size kt x dh x dw steps. The number of output features is equal to the - number of input planes / dt. - """ - return _functions.thnn.AvgPool3d(kernel_size, stride)(input)
    - - -# share the same interface -
    [docs]def max_pool1d(input, kernel_size, stride=None, padding=0, dilation=1, - ceil_mode=False, return_indices=False): - return _functions.thnn.MaxPool1d(kernel_size, stride, padding, dilation, - return_indices, ceil_mode)(input)
    - - -
    [docs]def max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1, - ceil_mode=False, return_indices=False): - return _functions.thnn.MaxPool2d(kernel_size, stride, padding, dilation, - return_indices, ceil_mode)(input)
    - - -
    [docs]def max_pool3d(input, kernel_size, stride=None, padding=0, dilation=1, - ceil_mode=False, return_indices=False): - return _functions.thnn.MaxPool3d(kernel_size, stride, padding, dilation, - return_indices, ceil_mode)(input)
    - - -def _unpool_output_size(input, kernel_size, stride, padding, output_size): - input_size = input.size() - default_size = [] - for d in range(len(kernel_size)): - default_size.append((input_size[d + 2] - 1) * stride[d] + - kernel_size[d] - 2 * padding[d]) - if output_size is None: - return default_size - - output_size = list(output_size) - if len(output_size) == len(kernel_size) + 2: - output_size = output_size[2:] - if len(output_size) != len(kernel_size): - raise ValueError("output_size should be a sequence containing " - "{} or {} elements, but it has a length of '{}'" - .format(len(kernel_size), len(kernel_size) + 2, - len(output_size))) - for d in range(len(kernel_size)): - min_size = default_size[d] - stride[d] - max_size = default_size[d] + stride[d] - if not (min_size < output_size[d] < max_size): - raise ValueError( - 'invalid output_size "{}" (dim {} must be between {} and {})' - .format(output_size, d, min_size, max_size)) - - return output_size - - -
    [docs]def max_unpool1d(input, indices, kernel_size, stride=None, padding=0, - output_size=None): - kernel_size = _single(kernel_size) - stride = _single(stride) - padding = _single(padding) - output_size = _unpool_output_size(input, kernel_size, stride, padding, - output_size) - f = _functions.thnn.MaxUnpool2d(output_size + [1]) - return f(input.unsqueeze(3), indices.unsqueeze(3)).squeeze(3)
    - - -
    [docs]def max_unpool2d(input, indices, kernel_size, stride=None, padding=0, - output_size=None): - kernel_size = _pair(kernel_size) - stride = _pair(stride) - padding = _pair(padding) - output_size = _unpool_output_size(input, kernel_size, stride, padding, - output_size) - f = _functions.thnn.MaxUnpool2d(output_size) - return f(input, indices)
    - - -
    [docs]def max_unpool3d(input, indices, kernel_size, stride=None, padding=0, - output_size=None): - kernel_size = _triple(kernel_size) - stride = _triple(stride) - padding = _triple(padding) - output_size = _unpool_output_size(input, kernel_size, stride, padding, - output_size) - f = _functions.thnn.MaxUnpool3d(output_size, stride, padding) - return f(input, indices)
    - - -
    [docs]def lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False): - kw, kh = utils._pair(kernel_size) - out = avg_pool2d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode) - return out.mul(kw * kh).pow(1. / norm_type)
    - - -# Activation functions - -
    [docs]def dropout(input, p=0.5, training=False, inplace=False): - return _functions.dropout.Dropout(p, training, inplace)(input)
    - - -
    [docs]def threshold(input, threshold, value, inplace=False): - return _functions.thnn.auto.Threshold(threshold, value, inplace)(input)
    - - -
    [docs]def relu(input, inplace=False): - return _functions.thnn.auto.Threshold(0, 0, inplace)(input)
    - - -
    [docs]def hardtanh(input, min_val=-1., max_val=1., inplace=False): - return _functions.thnn.auto.Hardtanh(min_val, max_val, inplace)(input)
    - - -
    [docs]def relu6(input, inplace=False): - return _functions.thnn.auto.Hardtanh(0, 6, inplace)(input)
    - - -
    [docs]def elu(input, alpha=1., inplace=False): - return _functions.thnn.auto.ELU(alpha, inplace)(input)
    - - -
    [docs]def leaky_relu(input, negative_slope=1e-2, inplace=False): - return _functions.thnn.auto.LeakyReLU(negative_slope, inplace)(input)
    - - -
    [docs]def prelu(input, weight): - return _functions.thnn.PReLU()(input, weight)
    - - -
    [docs]def rrelu(input, lower=1. / 8, upper=1. / 3, training=False, inplace=False): - return _functions.thnn.RReLU(lower, upper, training, inplace)(input)
    - - -
    [docs]def logsigmoid(input): - return _functions.thnn.LogSigmoid()(input)
    - - -
    [docs]def hardshrink(input, lambd=0.5): - return _functions.thnn.auto.Hardshrink(lambd)(input)
    - - -
    [docs]def tanhshrink(input): - return input - torch.tanh(input)
    - - -
    [docs]def softsign(input): - return _functions.activation.Softsign()(input)
    - - -
    [docs]def softplus(input, beta=1, threshold=20): - return _functions.thnn.auto.Softplus(beta, threshold)(input)
    - - -
    [docs]def softmin(input): - return _functions.thnn.Softmin()(input)
    - - -
    [docs]def softmax(input): - return _functions.thnn.auto.Softmax()(input)
    - - -
    [docs]def softshrink(input, lambd=0.5): - return _functions.thnn.auto.Softshrink(lambd)(input)
    - - -
    [docs]def log_softmax(input): - return _functions.thnn.LogSoftmax()(input)
    - - -
    [docs]def tanh(input): - return torch.tanh(input)
    - - -
    [docs]def sigmoid(input): - return torch.sigmoid(input)
    - - -# etc. - -
    [docs]def linear(input, weight, bias=None): - state = _functions.linear.Linear() - return bias and state(input, weight, bias) or state(input, weight)
    - - -
    [docs]def batch_norm(input, running_mean, running_var, weight=None, bias=None, - training=False, momentum=0.1, eps=1e-5): - f = torch._C._functions.BatchNorm(running_mean, running_var, training, momentum, eps, torch.backends.cudnn.enabled) - return f(input, weight, bias)
    - - -# loss - -
    [docs]def nll_loss(input, target, weight=None, size_average=True): - r"""The negative log likelihood loss. - - See :class:`~torch.nn.NLLLoss` for details. - - Args: - input: :math:`(N, C)` where `C = number of classes` - target: :math:`(N)` where each value is `0 <= targets[i] <= C-1` - weight (Variable, optional): a manual rescaling weight given to each - class. If given, has to be a Variable of size "nclasses" - size_average (bool, optional): By default, the losses are averaged - over observations for each minibatch. However, if the field - sizeAverage is set to False, the losses are instead summed - for each minibatch. - - Attributes: - weight: the class-weights given as input to the constructor - - Example: - >>> # input is of size nBatch x nClasses = 3 x 5 - >>> input = autograd.Variable(torch.randn(3, 5)) - >>> # each element in target has to have 0 <= value < nclasses - >>> target = autograd.Variable(torch.LongTensor([1, 0, 4])) - >>> output = F.nll_loss(F.log_softmax(input), target) - >>> output.backward() - """ - dim = input.dim() - if dim == 2: - f = _functions.thnn.NLLLoss(size_average, weight=weight) - elif dim == 4: - f = _functions.thnn.NLLLoss2d(size_average, weight=weight) - else: - raise ValueError('Expected 2 or 4 dimensions (got {})'.format(dim)) - return f(input, target)
    - - -
    [docs]def kl_div(input, target, size_average=True): - r"""The `Kullback-Leibler divergence`_ Loss. - - See :class:`~torch.nn.KLDivLoss` for details. - - Args: - input: Variable of arbitrary shape - target: Variable of the same shape as input - size_average: if True the output is divided by the number of elements - in input tensor - """ - return _functions.thnn.KLDivLoss(size_average)(input, target)
    - - -
    [docs]def cross_entropy(input, target, weight=None, size_average=True): - r"""This criterion combines `log_softmax` and `nll_loss` in one single class. - - See :class:`torch.nn.CrossEntropyLoss` for details. - - Args: - input: Variable :math:`(N, C)` where `C = number of classes` - target: Variable :math:`(N)` where each value is `0 <= targets[i] <= C-1` - weight (Tensor, optional): a manual rescaling weight given to each - class. If given, has to be a Tensor of size "nclasses" - size_average (bool, optional): By default, the losses are averaged - over observations for each minibatch. However, if the field - sizeAverage is set to False, the losses are instead summed - for each minibatch. - """ - return nll_loss(log_softmax(input), target, weight, size_average)
    - - -
    [docs]def binary_cross_entropy(input, target, weight=None, size_average=True): - r"""Function that measures the Binary Cross Entropy - between the target and the output: - - See :class:`~torch.nn.BCELoss` for details. - - Args: - input: Variable of arbitrary shape - target: Variable of the same shape as input - weight (Variable, optional): a manual rescaling weight - if provided it's repeated to match input tensor shape - size_average (bool, optional): By default, the losses are averaged - over observations for each minibatch. However, if the field - sizeAverage is set to False, the losses are instead summed - for each minibatch. - """ - return _functions.thnn.BCELoss(size_average, weight=weight)(input, target)
    - - -
    [docs]def smooth_l1_loss(input, target, size_average=True): - return _functions.thnn.SmoothL1Loss(size_average)(input, target)
    - - -
    [docs]def pixel_shuffle(input, upscale_factor): - r"""Rearranges elements in a tensor of shape ``[*, C*r^2, H, W]`` to a - tensor of shape ``[C, H*r, W*r]``. - - See :class:`~torch.nn.PixelShuffle` for details. - - Args: - input (Variable): Input - upscale_factor (int): factor to increase spatial resolution by - - Examples: - >>> ps = nn.PixelShuffle(3) - >>> input = autograd.Variable(torch.Tensor(1, 9, 4, 4)) - >>> output = ps(input) - >>> print(output.size()) - torch.Size([1, 1, 12, 12]) - """ - batch_size, channels, in_height, in_width = input.size() - channels //= upscale_factor ** 2 - - out_height = in_height * upscale_factor - out_width = in_width * upscale_factor - - input_view = input.contiguous().view( - batch_size, channels, upscale_factor, upscale_factor, - in_height, in_width) - - shuffle_out = input_view.permute(0, 1, 4, 2, 5, 3).contiguous() - return shuffle_out.view(batch_size, channels, out_height, out_width)
    - - -def upsample_nearest(input, size=None, scale_factor=None): - """Upsamples the input, using nearest neighbours' pixel values. - - Currently only spatial upsampling is supported (i.e. expected inputs - are 4 dimensional). - - Args: - input (Variable): input - size (int or Tuple[int, int]): output spatial size. - scale_factor (int): multiplier for spatial size. Has to be an integer. - """ - return _functions.thnn.UpsamplingNearest2d(size, scale_factor)(input) - - -def upsample_bilinear(input, size=None, scale_factor=None): - """Upscales the input, using the bilinear upsampling. - - Currently only spatial upsampling is supported (i.e. expected inputs - are 4 dimensional). - - Args: - input (Variable): input - size (int or Tuple[int, int]): output spatial size. - scale_factor (int): multiplier for spatial size. Has to be an integer. - """ - return _functions.thnn.UpsamplingBilinear2d(size, scale_factor)(input) - - -
    [docs]def pad(input, pad, mode='constant', value=0): - """Pads tensor. - - Currently only 2D and 3D padding supported. - In case of 4D input tensor pad should be in form (pad_l, pad_r, pad_t, pad_b ) - In case of 5D pad should be (pleft, pright, ptop, pbottom, pfront, pback) - - Args: - input (Variable): 4D or 5D tensor - pad (tuple): 4-elem or 6-elem tuple - mode: 'constant', 'reflect' or 'replicate' - value: fill value for 'constant' padding - """ - if input.dim() == 4: - assert len(pad) == 4, '4D tensors expect 4 values for padding' - if mode == 'constant': - return ConstantPad2d(pad, value)(input) - elif mode == 'reflect': - return _functions.thnn.ReflectionPad2d(*pad)(input) - elif mode == 'replicate': - return _functions.thnn.ReplicationPad2d(*pad)(input) - elif input.dim() == 5: - assert len(pad) == 6, '5D tensors expect 6 values for padding' - if mode == 'constant': - raise NotImplementedError - elif mode == 'reflect': - raise NotImplementedError - elif mode == 'replicate': - return _functions.thnn.ReplicationPad3d(*pad)(input) - else: - raise NotImplementedError("Only 4D and 5D padding is supported for now")
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/nn/init.html b/docs/_modules/torch/nn/init.html deleted file mode 100644 index 7bf4f278fd09..000000000000 --- a/docs/_modules/torch/nn/init.html +++ /dev/null @@ -1,813 +0,0 @@ - - - - - - - - - - - torch.nn.init — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.nn.init

    -import math
    -import random
    -
    -import torch
    -from torch.autograd import Variable
    -
    -
    -
    [docs]def uniform(tensor, a=0, b=1): - """Fills the input Tensor or Variable with values drawn from a uniform U(a,b) - - Args: - tensor: a n-dimension torch.Tensor - a: the lower bound of the uniform distribution - b: the upper bound of the uniform distribution - - Examples: - >>> w = torch.Tensor(3, 5) - >>> nn.init.uniform(w) - """ - if isinstance(tensor, Variable): - uniform(tensor.data, a=a, b=b) - return tensor - return tensor.uniform_(a, b)
    - - -
    [docs]def normal(tensor, mean=0, std=1): - """Fills the input Tensor or Variable with values drawn from a normal distribution with the given mean and std - - Args: - tensor: a n-dimension torch.Tensor - mean: the mean of the normal distribution - std: the standard deviation of the normal distribution - - Examples: - >>> w = torch.Tensor(3, 5) - >>> nn.init.normal(w) - """ - if isinstance(tensor, Variable): - normal(tensor.data, mean=mean, std=std) - return tensor - return tensor.normal_(mean, std)
    - - -
    [docs]def constant(tensor, val): - """Fills the input Tensor or Variable with the value `val` - - Args: - tensor: a n-dimension torch.Tensor - val: the value to fill the tensor with - - Examples: - >>> w = torch.Tensor(3, 5) - >>> nn.init.constant(w) - """ - if isinstance(tensor, Variable): - constant(tensor.data, val) - return tensor - return tensor.fill_(val)
    - - -def _calculate_fan_in_and_fan_out(tensor): - if tensor.ndimension() < 2: - raise ValueError("fan in and fan out can not be computed for tensor of size ", tensor.size()) - - if tensor.ndimension() == 2: # Linear - fan_in = tensor.size(1) - fan_out = tensor.size(0) - else: - num_input_fmaps = tensor.size(1) - num_output_fmaps = tensor.size(0) - receptive_field_size = 1 - if tensor.dim() > 2: - receptive_field_size = tensor[0][0].numel() - fan_in = num_input_fmaps * receptive_field_size - fan_out = num_output_fmaps * receptive_field_size - - return fan_in, fan_out - - -
    [docs]def xavier_uniform(tensor, gain=1): - """Fills the input Tensor or Variable with values according to the method described in "Understanding the - difficulty of training deep feedforward neural networks" - Glorot, X. and Bengio, Y., using a uniform - distribution. The resulting tensor will have values sampled from U(-a, a) where a = gain * sqrt(2/(fan_in + - fan_out)) * sqrt(3) - - Args: - tensor: a n-dimension torch.Tensor - gain: an optional scaling factor to be applied - - Examples: - >>> w = torch.Tensor(3, 5) - >>> nn.init.xavier_uniform(w, gain=math.sqrt(2.0)) - """ - if isinstance(tensor, Variable): - xavier_uniform(tensor.data, gain=gain) - return tensor - - fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) - std = gain * math.sqrt(2.0 / (fan_in + fan_out)) - a = math.sqrt(3.0) * std - return tensor.uniform_(-a, a)
    - - -
    [docs]def xavier_normal(tensor, gain=1): - """Fills the input Tensor or Variable with values according to the method described in "Understanding the - difficulty of training deep feedforward neural networks" - Glorot, X. and Bengio, Y., using a normal - distribution. The resulting tensor will have values sampled from normal distribution with mean=0 and std = gain * - sqrt(2/(fan_in + fan_out)) - - Args: - tensor: a n-dimension torch.Tensor - gain: an optional scaling factor to be applied - - Examples: - >>> w = torch.Tensor(3, 5) - >>> nn.init.xavier_normal(w) - """ - if isinstance(tensor, Variable): - xavier_normal(tensor.data, gain=gain) - return tensor - - fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) - std = gain * math.sqrt(2.0 / (fan_in + fan_out)) - return tensor.normal_(0, std)
    - - -def _calculate_correct_fan(tensor, mode): - mode = mode.lower() - valid_modes = ['fan_in', 'fan_out'] - if mode not in valid_modes: - raise ValueError("mode {} not supported, please use one of {}".format(mode, valid_modes)) - - fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) - if mode == 'fan_in': - return fan_in - else: - return fan_out - - -
    [docs]def kaiming_uniform(tensor, a=0, mode='fan_in'): - """Fills the input Tensor or Variable with values according to the method described in "Delving deep into - rectifiers: Surpassing human-level performance on ImageNet classification" - He, K. et al using a uniform - distribution. The resulting tensor will have values sampled from U(-bound, bound) where bound = sqrt(2/((1 + a^2) - * fan_in)) * sqrt(3) - - Args: - tensor: a n-dimension torch.Tensor - a: the coefficient of the slope of the rectifier used after this layer (0 for ReLU by default) - mode: either 'fan_in' (default) or 'fan_out'. Choosing `fan_in` preserves the magnitude of the variance of the - weights in the forward pass. Choosing `fan_out` preserves the magnitudes in the backwards pass. - - Examples: - >>> w = torch.Tensor(3, 5) - >>> nn.init.kaiming_uniform(w, mode='fan_in') - """ - if isinstance(tensor, Variable): - kaiming_uniform(tensor.data, a=a, mode=mode) - return tensor - - fan = _calculate_correct_fan(tensor, mode) - std = math.sqrt(2.0 / ((1 + a ** 2) * fan)) - bound = math.sqrt(3.0) * std - return tensor.uniform_(-bound, bound)
    - - -
    [docs]def kaiming_normal(tensor, a=0, mode='fan_in'): - """Fills the input Tensor or Variable with values according to the method described in "Delving deep into - rectifiers: Surpassing human-level performance on ImageNet classification" - He, K. et al using a normal - distribution. The resulting tensor will have values sampled from normal distribution with mean=0 and std = sqrt( - 2/((1 + a^2) * fan_in)) - - Args: - tensor: a n-dimension torch.Tensor - a: the coefficient of the slope of the rectifier used after this layer (0 for ReLU by default) - mode: either 'fan_in' (default) or 'fan_out'. Choosing `fan_in` preserves the magnitude of the variance of the - weights in the forward pass. Choosing `fan_out` preserves the magnitudes in the backwards pass. - - Examples: - >>> w = torch.Tensor(3, 5) - >>> nn.init.kaiming_normal(w, mode='fan_out') - """ - if isinstance(tensor, Variable): - kaiming_normal(tensor.data, a=a, mode=mode) - return tensor - - fan = _calculate_correct_fan(tensor, mode) - std = math.sqrt(2.0 / ((1 + a ** 2) * fan)) - return tensor.normal_(0, std)
    - - -
    [docs]def orthogonal(tensor, gain=1): - """Fills the input Tensor or Variable with a (semi) orthogonal matrix. The input tensor must have at least 2 - dimensions, and for tensors with more than 2 dimensions the trailing dimensions are flattened. viewed as 2D - representation with rows equal to the first dimension and columns equal to the product of as a sparse matrix, - where the non-zero elements will be drawn from a normal distribution with mean=0 and std=`std`. Reference: "Exact - solutions to the nonlinear dynamics of learning in deep linear neural networks"-Saxe, A. et al. - - Args: - tensor: a n-dimension torch.Tensor, where n >= 2 - gain: optional gain to be applied - - Examples: - >>> w = torch.Tensor(3, 5) - >>> nn.init.orthogonal(w) - """ - if isinstance(tensor, Variable): - orthogonal(tensor.data, gain=gain) - return tensor - - if tensor.ndimension() < 2: - raise ValueError("Only tensors with 2 or more dimensions are supported.") - rows = tensor.size(0) - cols = tensor[0].numel() - flattened = torch.Tensor(rows, cols).normal_(0, 1) - - u, s, v = torch.svd(flattened, some=True) - if u.is_same_size(flattened): - tensor.view_as(u).copy_(u) - else: - tensor.view_as(v.t()).copy_(v.t()) - - tensor.mul_(gain) - return tensor
    - - -
    [docs]def sparse(tensor, sparsity, std=0.01): - """Fills the 2D input Tensor or Variable as a sparse matrix, where the non-zero elements will be drawn from a - normal distribution with mean=0 and std=`std`. - - Args: - tensor: a n-dimension torch.Tensor - sparsity: The fraction of elements in each column to be set to zero - std: the standard deviation of the normal distribution used to generate the non-zero values - - Examples: - >>> w = torch.Tensor(3, 5) - >>> nn.init.sparse(w, sparsity=0.1) - """ - if isinstance(tensor, Variable): - sparse(tensor.data, sparsity, std=std) - return tensor - - if tensor.ndimension() != 2: - raise ValueError("Sparse initialization only supported for 2D inputs") - tensor.normal_(0, std) - rows, cols = tensor.size(0), tensor.size(1) - num_zeros = int(math.ceil(cols * sparsity)) - - for col_idx in range(tensor.size(1)): - row_indices = list(range(rows)) - random.shuffle(row_indices) - zero_indices = row_indices[:num_zeros] - for row_idx in zero_indices: - tensor[row_idx, col_idx] = 0 - - return tensor
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/nn/modules/activation.html b/docs/_modules/torch/nn/modules/activation.html deleted file mode 100644 index b5ed14b77e16..000000000000 --- a/docs/_modules/torch/nn/modules/activation.html +++ /dev/null @@ -1,1180 +0,0 @@ - - - - - - - - - - - torch.nn.modules.activation — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.nn.modules.activation

    -import torch
    -from torch.nn.parameter import Parameter
    -
    -from .module import Module
    -from .. import functional as F
    -
    -
    -
    [docs]class Threshold(Module): - """Thresholds each element of the input Tensor - - Threshold is defined as:: - - y = x if x >= threshold - value if x < threshold - - Args: - threshold: The value to threshold at - value: The value to replace with - inplace: can optionally do the operation in-place - - Shape: - - Input: :math:`(N, *)` where `*` means, any number of additional dimensions - - Output: :math:`(N, *)`, same shape as the input - - Examples:: - - >>> m = nn.Threshold(0.1, 20) - >>> input = Variable(torch.randn(2)) - >>> print(input) - >>> print(m(input)) - """ - - def __init__(self, threshold, value, inplace=False): - super(Threshold, self).__init__() - self.threshold = threshold - self.value = value - self.inplace = inplace - # TODO: check in THNN (if inplace == True, then assert value <= threshold) - - def forward(self, input): - return F.threshold(input, self.threshold, self.value, self.inplace) - - def __repr__(self): - inplace_str = ', inplace' if self.inplace else '' - return self.__class__.__name__ + ' (' \ - + str(self.threshold) \ - + ', ' + str(self.value) \ - + inplace_str + ')'
    - - -
    [docs]class ReLU(Threshold): - """Applies the rectified linear unit function element-wise :math:`{ReLU}(x)= max(0, x)` - - Args: - inplace: can optionally do the operation in-place - - Shape: - - Input: :math:`(N, *)` where `*` means, any number of additional dimensions - - Output: :math:`(N, *)`, same shape as the input - - Examples:: - - >>> m = nn.ReLU() - >>> input = autograd.Variable(torch.randn(2)) - >>> print(input) - >>> print(m(input)) - """ - - def __init__(self, inplace=False): - super(ReLU, self).__init__(0, 0, inplace) - - def __repr__(self): - inplace_str = 'inplace' if self.inplace else '' - return self.__class__.__name__ + ' (' \ - + inplace_str + ')'
    - - -class RReLU(Module): - - def __init__(self, lower=1. / 8, upper=1. / 3, inplace=False): - super(RReLU, self).__init__() - self.lower = lower - self.upper = upper - self.inplace = inplace - - def forward(self, input): - return F.rrelu(input, self.lower, self.upper, self.training, self.inplace) - - def __repr__(self): - inplace_str = ', inplace' if self.inplace else '' - return self.__class__.__name__ + ' (' \ - + str(self.lower) \ - + ', ' + str(self.upper) \ - + inplace_str + ')' - - -
    [docs]class Hardtanh(Module): - """Applies the HardTanh function element-wise - - HardTanh is defined as:: - - f(x) = +1, if x > 1 - f(x) = -1, if x < -1 - f(x) = x, otherwise - - The range of the linear region :math:`[-1, 1]` can be adjusted - - Args: - min_value: minimum value of the linear region range - max_value: maximum value of the linear region range - inplace: can optionally do the operation in-place - - Shape: - - Input: :math:`(N, *)` where `*` means, any number of additional dimensions - - Output: :math:`(N, *)`, same shape as the input - - Examples:: - - >>> m = nn.HardTanh(-2, 2) - >>> input = autograd.Variable(torch.randn(2)) - >>> print(input) - >>> print(m(input)) - """ - - def __init__(self, min_value=-1, max_value=1, inplace=False): - super(Hardtanh, self).__init__() - self.min_val = min_value - self.max_val = max_value - self.inplace = inplace - assert self.max_val > self.min_val - - def forward(self, input): - return F.hardtanh(input, self.min_val, self.max_val, self.inplace) - - def __repr__(self): - inplace_str = ', inplace' if self.inplace else '' - return self.__class__.__name__ + ' (' \ - + 'min_val=' + str(self.min_val) \ - + ', max_val=' + str(self.max_val) \ - + inplace_str + ')'
    - - -
    [docs]class ReLU6(Hardtanh): - """Applies the element-wise function :math:`{ReLU6}(x) = min(max(0,x), 6)` - - Args: - inplace: can optionally do the operation in-place - - Shape: - - Input: :math:`(N, *)` where `*` means, any number of additional dimensions - - Output: :math:`(N, *)`, same shape as the input - - Examples:: - - >>> m = nn.ReLU6() - >>> input = autograd.Variable(torch.randn(2)) - >>> print(input) - >>> print(m(input)) - """ - - def __init__(self, inplace=False): - super(ReLU6, self).__init__(0, 6, inplace) - - def __repr__(self): - inplace_str = 'inplace' if self.inplace else '' - return self.__class__.__name__ + ' (' \ - + inplace_str + ')'
    - - -
    [docs]class Sigmoid(Module): - """Applies the element-wise function :math:`f(x) = 1 / ( 1 + exp(-x))` - - Shape: - - Input: :math:`(N, *)` where `*` means, any number of additional dimensions - - Output: :math:`(N, *)`, same shape as the input - - Examples:: - - >>> m = nn.Sigmoid() - >>> input = autograd.Variable(torch.randn(2)) - >>> print(input) - >>> print(m(input)) - """ - - def forward(self, input): - return torch.sigmoid(input) - - def __repr__(self): - return self.__class__.__name__ + ' ()'
    - - -
    [docs]class Tanh(Module): - """Applies element-wise, :math:`f(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x))` - - Shape: - - Input: :math:`(N, *)` where `*` means, any number of additional dimensions - - Output: :math:`(N, *)`, same shape as the input - - Examples:: - - >>> m = nn.Tanh() - >>> input = autograd.Variable(torch.randn(2)) - >>> print(input) - >>> print(m(input)) - """ - - def forward(self, input): - return torch.tanh(input) - - def __repr__(self): - return self.__class__.__name__ + ' ()'
    - - -
    [docs]class ELU(Module): - """Applies element-wise, :math:`f(x) = max(0,x) + min(0, alpha * (exp(x) - 1))` - - Args: - alpha: the alpha value for the ELU formulation - inplace: can optionally do the operation in-place - - Shape: - - Input: :math:`(N, *)` where `*` means, any number of additional dimensions - - Output: :math:`(N, *)`, same shape as the input - - Examples:: - - >>> m = nn.ELU() - >>> input = autograd.Variable(torch.randn(2)) - >>> print(input) - >>> print(m(input)) - """ - - def __init__(self, alpha=1., inplace=False): - super(ELU, self).__init__() - self.alpha = alpha - self.inplace = inplace - - def forward(self, input): - return F.elu(input, self.alpha, self.inplace) - - def __repr__(self): - inplace_str = ', inplace' if self.inplace else '' - return self.__class__.__name__ + ' (' \ - + 'alpha=' + str(self.alpha) \ - + inplace_str + ')'
    - - -class Hardshrink(Module): - """Applies the hard shrinkage function element-wise - Hardshrink is defined as:: - f(x) = x, if x > lambda - f(x) = x, if x < -lambda - f(x) = 0, otherwise - - Args: - lambd: the lambda value for the Hardshrink formulation. Default: 0.5 - - Shape: - - Input: :math:`(N, *)` where `*` means, any number of additional dimensions - - Output: :math:`(N, *)`, same shape as the input - - Examples:: - - >>> m = nn.Hardshrink() - >>> input = autograd.Variable(torch.randn(2)) - >>> print(input) - >>> print(m(input)) - """ - - def __init__(self, lambd=0.5): - super(Hardshrink, self).__init__() - self.lambd = lambd - - def forward(self, input): - return F.hardshrink(input, self.lambd) - - def __repr__(self): - return self.__class__.__name__ + ' (' \ - + str(self.lambd) + ')' - - -
    [docs]class LeakyReLU(Module): - """Applies element-wise, :math:`f(x) = max(0, x) + {negative\_slope} * min(0, x)` - - Args: - negative_slope: Controls the angle of the negative slope. Default: 1e-2 - inplace: can optionally do the operation in-place - - Shape: - - Input: :math:`(N, *)` where `*` means, any number of additional dimensions - - Output: :math:`(N, *)`, same shape as the input - - Examples:: - - >>> m = nn.LeakyReLU(0.1) - >>> input = autograd.Variable(torch.randn(2)) - >>> print(input) - >>> print(m(input)) - """ - - def __init__(self, negative_slope=1e-2, inplace=False): - super(LeakyReLU, self).__init__() - self.negative_slope = negative_slope - self.inplace = inplace - - def forward(self, input): - return F.leaky_relu(input, self.negative_slope, self.inplace) - - def __repr__(self): - inplace_str = ', inplace' if self.inplace else '' - return self.__class__.__name__ + ' (' \ - + str(self.negative_slope) \ - + inplace_str + ')'
    - - -
    [docs]class LogSigmoid(Module): - """Applies element-wise :math:`LogSigmoid(x) = log( 1 / (1 + exp(-x_i)))` - - Shape: - - Input: :math:`(N, *)` where `*` means, any number of additional dimensions - - Output: :math:`(N, *)`, same shape as the input - - Examples:: - - >>> m = nn.LogSigmoid() - >>> input = autograd.Variable(torch.randn(2)) - >>> print(input) - >>> print(m(input)) - """ - - def forward(self, input): - return F.logsigmoid(input) - - def __repr__(self): - return self.__class__.__name__ + ' ()'
    - - -
    [docs]class Softplus(Module): - """Applies element-wise :math:`f(x) = 1/beta * log(1 + exp(beta * x_i))` - - SoftPlus is a smooth approximation to the ReLU function and can be used - to constrain the output of a machine to always be positive. - - For numerical stability the implementation reverts to the linear function - for inputs above a certain value. - - Args: - beta: the beta value for the Softplus formulation. Default: 1 - threshold: values above this revert to a linear function. Default: 20 - - Shape: - - Input: :math:`(N, *)` where `*` means, any number of additional dimensions - - Output: :math:`(N, *)`, same shape as the input - - Examples:: - - >>> m = nn.Softplus() - >>> input = autograd.Variable(torch.randn(2)) - >>> print(input) - >>> print(m(input)) - """ - - def __init__(self, beta=1, threshold=20): - super(Softplus, self).__init__() - self.beta = beta - self.threshold = threshold - - def forward(self, input): - return F.softplus(input, self.beta, self.threshold) - - def __repr__(self): - return self.__class__.__name__ + ' (' \ - + 'beta=' + str(self.beta) \ - + ', threshold=' + str(self.threshold) + ')'
    - - -
    [docs]class Softshrink(Module): - """Applies the soft shrinkage function elementwise - - SoftShrinkage operator is defined as:: - - f(x) = x-lambda, if x > lambda > f(x) = x+lambda, if x < -lambda - f(x) = 0, otherwise - - Args: - lambd: the lambda value for the Softshrink formulation. Default: 0.5 - - Shape: - - Input: :math:`(N, *)` where `*` means, any number of additional dimensions - - Output: :math:`(N, *)`, same shape as the input - - Examples:: - - >>> m = nn.Softshrink() - >>> input = autograd.Variable(torch.randn(2)) - >>> print(input) - >>> print(m(input)) - """ - - def __init__(self, lambd=0.5): - super(Softshrink, self).__init__() - self.lambd = lambd - - def forward(self, input): - return F.softshrink(input, self.lambd) - - def __repr__(self): - return self.__class__.__name__ + ' (' \ - + str(self.lambd) + ')'
    - - -
    [docs]class PReLU(Module): - """Applies element-wise the function :math:`PReLU(x) = max(0,x) + a * min(0,x)` - Here "a" is a learnable parameter. - When called without arguments, nn.PReLU() uses a single parameter "a" - across all input channels. If called with nn.PReLU(nChannels), a separate - "a" is used for each input channel. - - - .. note:: - weight decay should not be used when learning "a" for good performance. - - Args: - num_parameters: number of "a" to learn. Default: 1 - init: the initial value of "a". Default: 0.25 - - Shape: - - Input: :math:`(N, *)` where `*` means, any number of additional dimensions - - Output: :math:`(N, *)`, same shape as the input - - Examples:: - - >>> m = nn.PReLU() - >>> input = autograd.Variable(torch.randn(2)) - >>> print(input) - >>> print(m(input)) - """ - - def __init__(self, num_parameters=1, init=0.25): - self.num_parameters = num_parameters - super(PReLU, self).__init__() - self.weight = Parameter(torch.Tensor(num_parameters).fill_(init)) - - def forward(self, input): - return F.prelu(input, self.weight) - - def __repr__(self): - return self.__class__.__name__ + ' (' \ - + str(self.num_parameters) + ')'
    - - -
    [docs]class Softsign(Module): - """Applies element-wise, the function :math:`f(x) = x / (1 + |x|)` - - Shape: - - Input: :math:`(N, *)` where `*` means, any number of additional dimensions - - Output: :math:`(N, *)`, same shape as the input - - Examples:: - - >>> m = nn.Softsign() - >>> input = autograd.Variable(torch.randn(2)) - >>> print(input) - >>> print(m(input)) - """ - - def forward(self, input): - return F.softsign(input) - - def __repr__(self): - return self.__class__.__name__ + ' ()'
    - - -
    [docs]class Tanhshrink(Module): - """Applies element-wise, :math:`Tanhshrink(x) = x - Tanh(x)` - - Shape: - - Input: :math:`(N, *)` where `*` means, any number of additional dimensions - - Output: :math:`(N, *)`, same shape as the input - - Examples:: - - >>> m = nn.Tanhshrink() - >>> input = autograd.Variable(torch.randn(2)) - >>> print(input) - >>> print(m(input)) - """ - - def forward(self, input): - return F.tanhshrink(input) - - def __repr__(self): - return self.__class__.__name__ + ' ()'
    - - -
    [docs]class Softmin(Module): - """Applies the Softmin function to an n-dimensional input Tensor - rescaling them so that the elements of the n-dimensional output Tensor - lie in the range `(0, 1)` and sum to 1 - - :math:`f(x) = exp(-x_i - {shift}) / sum_j exp(-x_j - {shift})` - - where :math:`{shift} = max_i - x_i` - - Shape: - - Input: :math:`(N, L)` - - Output: :math:`(N, L)` - - Returns: - a Tensor of the same dimension and shape as the input, with - values in the range [0, 1] - - Examples:: - - >>> m = nn.Softmin() - >>> input = autograd.Variable(torch.randn(2, 3)) - >>> print(input) - >>> print(m(input)) - """ - - def forward(self, input): - return F.softmin(input) - - def __repr__(self): - return self.__class__.__name__ + ' ()'
    - - -
    [docs]class Softmax(Module): - """Applies the Softmax function to an n-dimensional input Tensor - rescaling them so that the elements of the n-dimensional output Tensor - lie in the range (0,1) and sum to 1 - - Softmax is defined as :math:`f_i(x) = exp(x_i - shift) / sum_j exp(x_j - shift)` - where `shift = max_i x_i` - - Shape: - - Input: :math:`(N, L)` - - Output: :math:`(N, L)` - - Returns: - a Tensor of the same dimension and shape as the input with - values in the range [0, 1] - - .. note:: - This module doesn't work directly with NLLLoss, - which expects the Log to be computed between the Softmax and itself. - Use Logsoftmax instead (it's faster). - - Examples:: - - >>> m = nn.Softmax() - >>> input = autograd.Variable(torch.randn(2, 3)) - >>> print(input) - >>> print(m(input)) - """ - - def forward(self, input): - assert input.dim() == 2, 'Softmax requires a 2D tensor as input' - return F.softmax(input) - - def __repr__(self): - return self.__class__.__name__ + ' ()'
    - - -class Softmax2d(Module): - """Applies SoftMax over features to each spatial location - - When given an image of Channels x Height x Width, it will - - apply Softmax to each location :math:`(Channels, h_i, w_j)` - - Shape: - - Input: :math:`(N, C, H, W)` - - Output: :math:`(N, C, H, W)` (same shape as input) - - Returns: - a Tensor of the same dimension and shape as the input with - values in the range [0, 1] - - Examples:: - - >>> m = nn.Softmax2d() - >>> # you softmax over the 2nd dimension - >>> input = autograd.Variable(torch.randn(2, 3, 12, 13)) - >>> print(input) - >>> print(m(input)) - """ - - def forward(self, input): - assert input.dim() == 4, 'Softmax2d requires a 4D tensor as input' - return F.softmax(input) - - def __repr__(self): - return self.__class__.__name__ + ' ()' - - -
    [docs]class LogSoftmax(Module): - """Applies the Log(Softmax(x)) function to an n-dimensional input Tensor. - The LogSoftmax formulation can be simplified as - - :math:`f_i(x) = log(1 / a * exp(x_i))` where :math:`a = sum_j exp(x_j)` - - Shape: - - Input: :math:`(N, L)` - - Output: :math:`(N, L)` - - Returns: - a Tensor of the same dimension and shape as the input with - values in the range [-inf, 0) - - Examples:: - - >>> m = nn.LogSoftmax() - >>> input = autograd.Variable(torch.randn(2, 3)) - >>> print(input) - >>> print(m(input)) - """ - - def forward(self, input): - return F.log_softmax(input) - - def __repr__(self): - return self.__class__.__name__ + ' ()'
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/nn/modules/batchnorm.html b/docs/_modules/torch/nn/modules/batchnorm.html deleted file mode 100644 index 9aeb3743be0f..000000000000 --- a/docs/_modules/torch/nn/modules/batchnorm.html +++ /dev/null @@ -1,731 +0,0 @@ - - - - - - - - - - - torch.nn.modules.batchnorm — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.nn.modules.batchnorm

    -import torch
    -from .module import Module
    -from torch.nn.parameter import Parameter
    -from .. import functional as F
    -
    -
    -# TODO: check contiguous in THNN
    -# TODO: use separate backend functions?
    -class _BatchNorm(Module):
    -
    -    def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True):
    -        super(_BatchNorm, self).__init__()
    -        self.num_features = num_features
    -        self.affine = affine
    -        self.eps = eps
    -        self.momentum = momentum
    -        if self.affine:
    -            self.weight = Parameter(torch.Tensor(num_features))
    -            self.bias = Parameter(torch.Tensor(num_features))
    -        else:
    -            self.register_parameter('weight', None)
    -            self.register_parameter('bias', None)
    -        self.register_buffer('running_mean', torch.zeros(num_features))
    -        self.register_buffer('running_var', torch.ones(num_features))
    -        self.reset_parameters()
    -
    -    def reset_parameters(self):
    -        self.running_mean.zero_()
    -        self.running_var.fill_(1)
    -        if self.affine:
    -            self.weight.data.uniform_()
    -            self.bias.data.zero_()
    -
    -    def _check_input_dim(self, input):
    -        if input.size(1) != self.running_mean.nelement():
    -            raise ValueError('got {}-feature tensor, expected {}'
    -                             .format(input.size(1), self.num_features))
    -
    -    def forward(self, input):
    -        self._check_input_dim(input)
    -        return F.batch_norm(
    -            input, self.running_mean, self.running_var, self.weight, self.bias,
    -            self.training, self.momentum, self.eps)
    -
    -    def __repr__(self):
    -        return ('{name}({num_features}, eps={eps}, momentum={momentum},'
    -                ' affine={affine})'
    -                .format(name=self.__class__.__name__, **self.__dict__))
    -
    -
    -
    [docs]class BatchNorm1d(_BatchNorm): - r"""Applies Batch Normalization over a 2d or 3d input that is seen as a mini-batch. - - .. math:: - - y = \frac{x - mean[x]}{ \sqrt{Var[x]} + \epsilon} * gamma + beta - - The mean and standard-deviation are calculated per-dimension over - the mini-batches and gamma and beta are learnable parameter vectors - of size N (where N is the input size). - - During training, this layer keeps a running estimate of its computed mean - and variance. The running sum is kept with a default momentum of 0.1. - - During evaluation, this running mean/variance is used for normalization. - - Args: - num_features: num_features from an expected input of size `batch_size x num_features [x width]` - eps: a value added to the denominator for numerical stability. Default: 1e-5 - momentum: the value used for the running_mean and running_var computation. Default: 0.1 - affine: a boolean value that when set to true, gives the layer learnable affine parameters. - - Shape: - - Input: :math:`(N, C)` or :math:`(N, C, L)` - - Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input) - - Examples: - >>> # With Learnable Parameters - >>> m = nn.BatchNorm1d(100) - >>> # Without Learnable Parameters - >>> m = nn.BatchNorm1d(100, affine=False) - >>> input = autograd.Variable(torch.randn(20, 100)) - >>> output = m(input) - """ - - def _check_input_dim(self, input): - if input.dim() != 2 and input.dim() != 3: - raise ValueError('expected 2D or 3D input (got {}D input)' - .format(input.dim())) - super(BatchNorm1d, self)._check_input_dim(input)
    - - -
    [docs]class BatchNorm2d(_BatchNorm): - r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch of 3d inputs - - .. math:: - - y = \frac{x - mean[x]}{ \sqrt{Var[x]} + \epsilon} * gamma + beta - - The mean and standard-deviation are calculated per-dimension over - the mini-batches and gamma and beta are learnable parameter vectors - of size N (where N is the input size). - - During training, this layer keeps a running estimate of its computed mean - and variance. The running sum is kept with a default momentum of 0.1. - - During evaluation, this running mean/variance is used for normalization. - - Args: - num_features: num_features from an expected input of size batch_size x num_features x height x width - eps: a value added to the denominator for numerical stability. Default: 1e-5 - momentum: the value used for the running_mean and running_var computation. Default: 0.1 - affine: a boolean value that when set to true, gives the layer learnable affine parameters. - - Shape: - - Input: :math:`(N, C, H, W)` - - Output: :math:`(N, C, H, W)` (same shape as input) - - Examples: - >>> # With Learnable Parameters - >>> m = nn.BatchNorm2d(100) - >>> # Without Learnable Parameters - >>> m = nn.BatchNorm2d(100, affine=False) - >>> input = autograd.Variable(torch.randn(20, 100, 35, 45)) - >>> output = m(input) - """ - - def _check_input_dim(self, input): - if input.dim() != 4: - raise ValueError('expected 4D input (got {}D input)' - .format(input.dim())) - super(BatchNorm2d, self)._check_input_dim(input)
    - - -
    [docs]class BatchNorm3d(_BatchNorm): - r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch of 4d inputs - - .. math:: - - y = \frac{x - mean[x]}{ \sqrt{Var[x]} + \epsilon} * gamma + beta - - The mean and standard-deviation are calculated per-dimension over - the mini-batches and gamma and beta are learnable parameter vectors - of size N (where N is the input size). - - During training, this layer keeps a running estimate of its computed mean - and variance. The running sum is kept with a default momentum of 0.1. - - During evaluation, this running mean/variance is used for normalization. - - Args: - num_features: num_features from an expected input of size batch_size x num_features x height x width - eps: a value added to the denominator for numerical stability. Default: 1e-5 - momentum: the value used for the running_mean and running_var computation. Default: 0.1 - affine: a boolean value that when set to true, gives the layer learnable affine parameters. - - Shape: - - Input: :math:`(N, C, D, H, W)` - - Output: :math:`(N, C, D, H, W)` (same shape as input) - - Examples: - >>> # With Learnable Parameters - >>> m = nn.BatchNorm3d(100) - >>> # Without Learnable Parameters - >>> m = nn.BatchNorm3d(100, affine=False) - >>> input = autograd.Variable(torch.randn(20, 100, 35, 45, 10)) - >>> output = m(input) - """ - - def _check_input_dim(self, input): - if input.dim() != 5: - raise ValueError('expected 5D input (got {}D input)' - .format(input.dim())) - super(BatchNorm3d, self)._check_input_dim(input)
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/nn/modules/container.html b/docs/_modules/torch/nn/modules/container.html deleted file mode 100644 index 88e24913f1dc..000000000000 --- a/docs/_modules/torch/nn/modules/container.html +++ /dev/null @@ -1,760 +0,0 @@ - - - - - - - - - - - torch.nn.modules.container — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.nn.modules.container

    -from collections import OrderedDict
    -import string
    -import torch
    -import warnings
    -from .module import Module
    -
    -
    -class Container(Module):
    -
    -    def __init__(self, **kwargs):
    -        super(Container, self).__init__()
    -        # DeprecationWarning is ignored by default <sigh>
    -        warnings.warn("nn.Container is deprecated. All of it's functionality "
    -                      "is now implemented in nn.Module. Subclass that instead.")
    -        for key, value in kwargs.items():
    -            self.add_module(key, value)
    -
    -
    -
    [docs]class Sequential(Module): - """A sequential container. - Modules will be added to it in the order they are passed in the constructor. - Alternatively, an ordered dict of modules can also be passed in. - - To make it easier to understand, given is a small example:: - - # Example of using Sequential - model = nn.Sequential( - nn.Conv2d(1,20,5), - nn.ReLU(), - nn.Conv2d(20,64,5), - nn.ReLU() - ) - - # Example of using Sequential with OrderedDict - model = nn.Sequential(OrderedDict([ - ('conv1', nn.Conv2d(1,20,5)), - ('relu1', nn.ReLU()), - ('conv2', nn.Conv2d(20,64,5)), - ('relu2', nn.ReLU()) - ])) - """ - - def __init__(self, *args): - super(Sequential, self).__init__() - if len(args) == 1 and isinstance(args[0], OrderedDict): - for key, module in args[0].items(): - self.add_module(key, module) - else: - idx = 0 - for module in args: - self.add_module(str(idx), module) - idx += 1 - - def __getitem__(self, idx): - if idx < 0 or idx >= len(self._modules): - raise IndexError('index {} is out of range'.format(idx)) - it = iter(self._modules.values()) - for i in range(idx): - next(it) - return next(it) - - def forward(self, input): - for module in self._modules.values(): - input = module(input) - return input
    - - -
    [docs]class ModuleList(Module): - """Holds submodules in a list. - - ModuleList can be indexed like a regular Python list, but modules it contains - are properly registered, and will be visible by all Module methods. - - Arguments: - modules (list, optional): a list of modules to add - - Example:: - - class MyModule(nn.Module): - def __init__(self): - super(MyModule, self).__init__() - self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)]) - - def forward(self, x): - # ModuleList can act as an iterable, or be indexed using ints - for i, l in enumerate(self.linears): - x = self.linears[i // 2](x) + l(x) - return x - """ - - def __init__(self, modules=None): - super(ModuleList, self).__init__() - if modules is not None: - self += modules - - def __getitem__(self, idx): - if idx < 0: - idx += len(self) - return self._modules[str(idx)] - - def __setitem__(self, idx, module): - return setattr(self, str(idx), module) - - def __len__(self): - return len(self._modules) - - def __iter__(self): - return iter(self._modules.values()) - - def __iadd__(self, modules): - return self.extend(modules) - -
    [docs] def append(self, module): - """Appends a given module at the end of the list. - - Arguments: - module (nn.Module): module to append - """ - self.add_module(str(len(self)), module) - return self
    - -
    [docs] def extend(self, modules): - """Appends modules from a Python list at the end. - - Arguments: - modules (list): list of modules to append - """ - if not isinstance(modules, list): - raise TypeError("ModuleList.extend should be called with a " - "list, but got " + type(modules).__name__) - offset = len(self) - for i, module in enumerate(modules): - self.add_module(str(offset + i), module) - return self
    - - -
    [docs]class ParameterList(Module): - """Holds submodules in a list. - - ParameterList can be indexed like a regular Python list, but parameters it contains - are properly registered, and will be visible by all Module methods. - - Arguments: - modules (list, optional): a list of :class:`nn.Parameter`` to add - - Example:: - - class MyModule(nn.Module): - def __init__(self): - super(MyModule, self).__init__() - self.params = nn.ParameterList([nn.Parameter(torch.randn(10, 10)) for i in range(10)]) - - def forward(self, x): - # ModuleList can act as an iterable, or be indexed using ints - for i, p in enumerate(self.params): - x = self.params[i // 2].mm(x) + p.mm(x) - return x - """ - - def __init__(self, parameters=None): - super(ParameterList, self).__init__() - if parameters is not None: - self += parameters - - def __getitem__(self, idx): - if idx < 0: - idx += len(self) - return self._parameters[str(idx)] - - def __setitem__(self, idx, param): - return self.register_parameter(str(idx), param) - - def __len__(self): - return len(self._parameters) - - def __iter__(self): - return iter(self._parameters.values()) - - def __iadd__(self, parameters): - return self.extend(parameters) - -
    [docs] def append(self, parameter): - """Appends a given parameter at the end of the list. - - Arguments: - parameter (nn.Parameter): parameter to append - """ - self.register_parameter(str(len(self)), parameter) - return self
    - -
    [docs] def extend(self, parameters): - """Appends parameters from a Python list at the end. - - Arguments: - parameters (list): list of parameters to append - """ - if not isinstance(parameters, list): - raise TypeError("ParameterList.extend should be called with a " - "list, but got " + type(parameters).__name__) - offset = len(self) - for i, param in enumerate(parameters): - self.register_parameter(str(offset + i), param) - return self
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/nn/modules/conv.html b/docs/_modules/torch/nn/modules/conv.html deleted file mode 100644 index 48f2ae6ec61e..000000000000 --- a/docs/_modules/torch/nn/modules/conv.html +++ /dev/null @@ -1,1175 +0,0 @@ - - - - - - - - - - - torch.nn.modules.conv — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.nn.modules.conv

    -import math
    -import torch
    -from torch.nn.parameter import Parameter
    -from .. import functional as F
    -from .module import Module
    -from .utils import _single, _pair, _triple
    -
    -
    -class _ConvNd(Module):
    -
    -    def __init__(self, in_channels, out_channels, kernel_size, stride,
    -                 padding, dilation, transposed, output_padding, groups, bias):
    -        super(_ConvNd, self).__init__()
    -        if in_channels % groups != 0:
    -            raise ValueError('in_channels must be divisible by groups')
    -        if out_channels % groups != 0:
    -            raise ValueError('out_channels must be divisible by groups')
    -        self.in_channels = in_channels
    -        self.out_channels = out_channels
    -        self.kernel_size = kernel_size
    -        self.stride = stride
    -        self.padding = padding
    -        self.dilation = dilation
    -        self.transposed = transposed
    -        self.output_padding = output_padding
    -        self.groups = groups
    -        if transposed:
    -            self.weight = Parameter(torch.Tensor(
    -                in_channels, out_channels // groups, *kernel_size))
    -        else:
    -            self.weight = Parameter(torch.Tensor(
    -                out_channels, in_channels // groups, *kernel_size))
    -        if bias:
    -            self.bias = Parameter(torch.Tensor(out_channels))
    -        else:
    -            self.register_parameter('bias', None)
    -        self.reset_parameters()
    -
    -    def reset_parameters(self):
    -        n = self.in_channels
    -        for k in self.kernel_size:
    -            n *= k
    -        stdv = 1. / math.sqrt(n)
    -        self.weight.data.uniform_(-stdv, stdv)
    -        if self.bias is not None:
    -            self.bias.data.uniform_(-stdv, stdv)
    -
    -    def __repr__(self):
    -        s = ('{name}({in_channels}, {out_channels}, kernel_size={kernel_size}'
    -             ', stride={stride}')
    -        if self.padding != (0,) * len(self.padding):
    -            s += ', padding={padding}'
    -        if self.dilation != (1,) * len(self.dilation):
    -            s += ', dilation={dilation}'
    -        if self.output_padding != (0,) * len(self.output_padding):
    -            s += ', output_padding={output_padding}'
    -        if self.groups != 1:
    -            s += ', groups={groups}'
    -        if self.bias is None:
    -            s += ', bias=False'
    -        s += ')'
    -        return s.format(name=self.__class__.__name__, **self.__dict__)
    -
    -
    -
    [docs]class Conv1d(_ConvNd): - r"""Applies a 1D convolution over an input signal composed of several input - planes. - - In the simplest case, the output value of the layer with input size :math:`(N, C_{in}, L)` - and output :math:`(N, C_{out}, L_{out})` can be precisely described as: - - .. math:: - - \begin{array}{ll} - out(N_i, C_{out_j}) = bias(C_{out_j}) - + \sum_{{k}=0}^{C_{in}-1} weight(C_{out_j}, k) \star input(N_i, k) - \end{array} - - where :math:`\star` is the valid `cross-correlation`_ operator - - | :attr:`stride` controls the stride for the cross-correlation. - | If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides - for :attr:`padding` number of points - | :attr:`dilation` controls the spacing between the kernel points. It is harder to describe, - but this `link`_ has a nice visualization of what :attr:`dilation` does. - | :attr:`groups` controls the connections between inputs and outputs. - | At groups=1, all inputs are convolved to all outputs. - | At groups=2, the operation becomes equivalent to having two conv layers - side by side, each seeing half the input channels, - and producing half the output channels, and both subsequently concatenated. - - .. note:: - - Depending of the size of your kernel, several (of the last) - columns of the input might be lost, because it is a valid `cross-correlation`_, - and not a full `cross-correlation`_. - It is up to the user to add proper padding. - - Args: - in_channels (int): Number of channels in the input image - out_channels (int): Number of channels produced by the convolution - kernel_size (int or tuple): Size of the convolving kernel - stride (int or tuple, optional): Stride of the convolution - padding (int or tuple, optional): Zero-padding added to both sides of the input - dilation (int or tuple, optional): Spacing between kernel elements - groups (int, optional): Number of blocked connections from input channels to output channels - bias (bool, optional): If True, adds a learnable bias to the output - - Shape: - - Input: :math:`(N, C_{in}, L_{in})` - - Output: :math:`(N, C_{out}, L_{out})` where - :math:`L_{out} = floor((L_{in} + 2 * padding - dilation * (kernel\_size - 1) - 1) / stride + 1)` - - Attributes: - weight (Tensor): the learnable weights of the module of shape (out_channels, in_channels, kernel_size) - bias (Tensor): the learnable bias of the module of shape (out_channels) - - Examples:: - - >>> m = nn.Conv1d(16, 33, 3, stride=2) - >>> input = autograd.Variable(torch.randn(20, 16, 50)) - >>> output = m(input) - - .. _cross-correlation: - https://en.wikipedia.org/wiki/Cross-correlation - - .. _link: - https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md - """ - - def __init__(self, in_channels, out_channels, kernel_size, stride=1, - padding=0, dilation=1, groups=1, bias=True): - kernel_size = _single(kernel_size) - stride = _single(stride) - padding = _single(padding) - dilation = _single(dilation) - super(Conv1d, self).__init__( - in_channels, out_channels, kernel_size, stride, padding, dilation, - False, _single(0), groups, bias) - - def forward(self, input): - return F.conv1d(input, self.weight, self.bias, self.stride, - self.padding, self.dilation, self.groups)
    - - -
    [docs]class Conv2d(_ConvNd): - r"""Applies a 2D convolution over an input signal composed of several input - planes. - - In the simplest case, the output value of the layer with input size :math:`(N, C_{in}, H, W)` - and output :math:`(N, C_{out}, H_{out}, W_{out})` can be precisely described as: - - .. math:: - - \begin{array}{ll} - out(N_i, C_{out_j}) = bias(C_{out_j}) - + \sum_{{k}=0}^{C_{in}-1} weight(C_{out_j}, k) \star input(N_i, k) - \end{array} - - where :math:`\star` is the valid 2D `cross-correlation`_ operator - - | :attr:`stride` controls the stride for the cross-correlation. - | If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides - for :attr:`padding` number of points - | :attr:`dilation` controls the spacing between the kernel points. It is harder to describe, - but this `link`_ has a nice visualization of what :attr:`dilation` does. - | :attr:`groups` controls the connections between inputs and outputs. - | At groups=1, all inputs are convolved to all outputs. - | At groups=2, the operation becomes equivalent to having two conv layers - side by side, each seeing half the input channels, - and producing half the output channels, and both subsequently concatenated. - - The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be: - - - a single ``int`` -- in which case the same value is used for the height and width dimension - - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension, - and the second `int` for the width dimension - - .. note:: - - Depending of the size of your kernel, several (of the last) - columns of the input might be lost, because it is a valid `cross-correlation`_, - and not a full `cross-correlation`_. - It is up to the user to add proper padding. - - Args: - in_channels (int): Number of channels in the input image - out_channels (int): Number of channels produced by the convolution - kernel_size (int or tuple): Size of the convolving kernel - stride (int or tuple, optional): Stride of the convolution - padding (int or tuple, optional): Zero-padding added to both sides of the input - dilation (int or tuple, optional): Spacing between kernel elements - groups (int, optional): Number of blocked connections from input channels to output channels - bias (bool, optional): If True, adds a learnable bias to the output - - Shape: - - Input: :math:`(N, C_{in}, H_{in}, W_{in})` - - Output: :math:`(N, C_{out}, H_{out}, W_{out})` where - :math:`H_{out} = floor((H_{in} + 2 * padding[0] - dilation[0] * (kernel\_size[0] - 1) - 1) / stride[0] + 1)` - :math:`W_{out} = floor((W_{in} + 2 * padding[1] - dilation[1] * (kernel\_size[1] - 1) - 1) / stride[1] + 1)` - - Attributes: - weight (Tensor): the learnable weights of the module of shape - (out_channels, in_channels, kernel_size[0], kernel_size[1]) - bias (Tensor): the learnable bias of the module of shape (out_channels) - - Examples:: - - >>> # With square kernels and equal stride - >>> m = nn.Conv2d(16, 33, 3, stride=2) - >>> # non-square kernels and unequal stride and with padding - >>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2)) - >>> # non-square kernels and unequal stride and with padding and dilation - >>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1)) - >>> input = autograd.Variable(torch.randn(20, 16, 50, 100)) - >>> output = m(input) - - .. _cross-correlation: - https://en.wikipedia.org/wiki/Cross-correlation - - .. _link: - https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md - """ - - def __init__(self, in_channels, out_channels, kernel_size, stride=1, - padding=0, dilation=1, groups=1, bias=True): - kernel_size = _pair(kernel_size) - stride = _pair(stride) - padding = _pair(padding) - dilation = _pair(dilation) - super(Conv2d, self).__init__( - in_channels, out_channels, kernel_size, stride, padding, dilation, - False, _pair(0), groups, bias) - - def forward(self, input): - return F.conv2d(input, self.weight, self.bias, self.stride, - self.padding, self.dilation, self.groups)
    - - -
    [docs]class Conv3d(_ConvNd): - r"""Applies a 3D convolution over an input signal composed of several input - planes. - - In the simplest case, the output value of the layer with input size :math:`(N, C_{in}, D, H, W)` - and output :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` can be precisely described as: - - .. math:: - - \begin{array}{ll} - out(N_i, C_{out_j}) = bias(C_{out_j}) - + \sum_{{k}=0}^{C_{in}-1} weight(C_{out_j}, k) \star input(N_i, k) - \end{array} - - where :math:`\star` is the valid 3D `cross-correlation`_ operator - - | :attr:`stride` controls the stride for the cross-correlation. - | If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides - for :attr:`padding` number of points - | :attr:`dilation` controls the spacing between the kernel points. It is harder to describe, - but this `link`_ has a nice visualization of what :attr:`dilation` does. - | :attr:`groups` controls the connections between inputs and outputs. - | At groups=1, all inputs are convolved to all outputs. - | At groups=2, the operation becomes equivalent to having two conv layers - side by side, each seeing half the input channels, - and producing half the output channels, and both subsequently concatenated. - - The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be: - - - a single ``int`` -- in which case the same value is used for the height and width dimension - - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension, - the second `int` for the height dimension and the third `int` for the width dimension - - .. note:: - - Depending of the size of your kernel, several (of the last) - columns of the input might be lost, because it is a valid `cross-correlation`_, - and not a full `cross-correlation`_. - It is up to the user to add proper padding. - - Args: - in_channels (int): Number of channels in the input image - out_channels (int): Number of channels produced by the convolution - kernel_size (int or tuple): Size of the convolving kernel - stride (int or tuple, optional): Stride of the convolution - padding (int or tuple, optional): Zero-padding added to both sides of the input - dilation (int or tuple, optional): Spacing between kernel elements - groups (int, optional): Number of blocked connections from input channels to output channels - bias (bool, optional): If True, adds a learnable bias to the output - - Shape: - - Input: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` - - Output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` where - :math:`D_{out} = floor((D_{in} + 2 * padding[0] - dilation[0] * (kernel\_size[0] - 1) - 1) / stride[0] + 1)` - :math:`H_{out} = floor((H_{in} + 2 * padding[1] - dilation[1] * (kernel\_size[1] - 1) - 1) / stride[1] + 1)` - :math:`W_{out} = floor((W_{in} + 2 * padding[2] - dilation[2] * (kernel\_size[2] - 1) - 1) / stride[2] + 1)` - - Attributes: - weight (Tensor): the learnable weights of the module of shape - (out_channels, in_channels, kernel_size[0], kernel_size[1], kernel_size[2]) - bias (Tensor): the learnable bias of the module of shape (out_channels) - - Examples:: - - >>> # With square kernels and equal stride - >>> m = nn.Conv3d(16, 33, 3, stride=2) - >>> # non-square kernels and unequal stride and with padding - >>> m = nn.Conv3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0)) - >>> input = autograd.Variable(torch.randn(20, 16, 10, 50, 100)) - >>> output = m(input) - - .. _cross-correlation: - https://en.wikipedia.org/wiki/Cross-correlation - - .. _link: - https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md - """ - - def __init__(self, in_channels, out_channels, kernel_size, stride=1, - padding=0, dilation=1, groups=1, bias=True): - kernel_size = _triple(kernel_size) - stride = _triple(stride) - padding = _triple(padding) - dilation = _triple(dilation) - super(Conv3d, self).__init__( - in_channels, out_channels, kernel_size, stride, padding, dilation, - False, _triple(0), groups, bias) - - def forward(self, input): - return F.conv3d(input, self.weight, self.bias, self.stride, - self.padding, self.dilation, self.groups)
    - - -class _ConvTransposeMixin(object): - - def forward(self, input, output_size=None): - output_padding = self._output_padding(input, output_size) - func = self._backend.ConvNd( - self.stride, self.padding, self.dilation, self.transposed, - output_padding, self.groups) - if self.bias is None: - return func(input, self.weight) - else: - return func(input, self.weight, self.bias) - - def _output_padding(self, input, output_size): - if output_size is None: - return self.output_padding - - output_size = list(output_size) - k = input.dim() - 2 - if len(output_size) == k + 2: - output_size = output_size[-2:] - if len(output_size) != k: - raise ValueError( - "output_size must have {} or {} elements (got {})" - .format(k, k + 2, len(output_size))) - - def dim_size(d): - return ((input.size(d + 2) - 1) * self.stride[d] - - 2 * self.padding[d] + self.kernel_size[d]) - - min_sizes = [dim_size(d) for d in range(k)] - max_sizes = [min_sizes[d] + self.stride[d] - 1 for d in range(k)] - for size, min_size, max_size in zip(output_size, min_sizes, max_sizes): - if size < min_size or size > max_size: - raise ValueError(( - "requested an output size of {}, but valid sizes range " - "from {} to {} (for an input of {})").format( - output_size, min_sizes, max_sizes, input.size()[2:])) - - return tuple([output_size[d] - min_sizes[d] for d in range(k)]) - - -
    [docs]class ConvTranspose1d(_ConvTransposeMixin, _ConvNd): - """Applies a 1D transposed convolution operator over an input image - composed of several input planes. - - This module can be seen as the gradient of Conv1d with respect to its input. - It is sometimes (but incorrectly) refered to as a deconvolutional operation. - - .. note:: - - Depending of the size of your kernel, several (of the last) - columns of the input might be lost, because it is a valid `cross-correlation`_, - and not a full `cross-correlation`_. - It is up to the user to add proper padding. - - Args: - in_channels (int): Number of channels in the input image - out_channels (int): Number of channels produced by the convolution - kernel_size (int or tuple): Size of the convolving kernel - stride (int or tuple, optional): Stride of the convolution - padding (int or tuple, optional): Zero-padding added to both sides of the input - output_padding (int or tuple, optional): Zero-padding added to one side of the output - groups (int, optional): Number of blocked connections from input channels to output channels - bias (bool, optional): If True, adds a learnable bias to the output - - Shape: - - Input: :math:`(N, C_{in}, L_{in})` - - Output: :math:`(N, C_{out}, L_{out})` where - :math:`L_{out} = (L_{in} - 1) * stride - 2 * padding + kernel\_size + output\_padding` - - Attributes: - weight (Tensor): the learnable weights of the module of shape - (in_channels, out_channels, kernel_size[0], kernel_size[1]) - bias (Tensor): the learnable bias of the module of shape (out_channels) - """ - - def __init__(self, in_channels, out_channels, kernel_size, stride=1, - padding=0, output_padding=0, groups=1, bias=True): - kernel_size = _single(kernel_size) - stride = _single(stride) - padding = _single(padding) - dilation = _single(1) - output_padding = _single(output_padding) - super(ConvTranspose1d, self).__init__( - in_channels, out_channels, kernel_size, stride, padding, dilation, - True, output_padding, groups, bias) - - def forward(self, input, output_size=None): - output_padding = self._output_padding(input, output_size) - return F.conv_transpose1d( - input, self.weight, self.bias, self.stride, self.padding, - output_padding, self.groups)
    - - -
    [docs]class ConvTranspose2d(_ConvTransposeMixin, _ConvNd): - r"""Applies a 2D transposed convolution operator over an input image - composed of several input planes. - - This module can be seen as the gradient of Conv2d with respect to its input. - It is sometimes (but incorrectly) refered to as a deconvolutional operation. - - | :attr:`stride` controls the stride for the cross-correlation. - | If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides - for :attr:`padding` number of points - | If :attr:`output_padding` is non-zero, then the output is implicitly zero-padded on one side - for :attr:`output_padding` number of points - | :attr:`dilation` controls the spacing between the kernel points. It is harder to describe, - but this `link`_ has a nice visualization of what :attr:`dilation` does. - | :attr:`groups` controls the connections between inputs and outputs. - | At groups=1, all inputs are convolved to all outputs. - | At groups=2, the operation becomes equivalent to having two conv layers - side by side, each seeing half the input channels, - and producing half the output channels, and both subsequently concatenated. - - The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`output_padding` - can either be: - - - a single ``int`` -- in which case the same value is used for the height and width dimension - - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension, - and the second `int` for the width dimension - - .. note:: - - Depending of the size of your kernel, several (of the last) - columns of the input might be lost, because it is a valid `cross-correlation`_, - and not a full `cross-correlation`_. - It is up to the user to add proper padding. - - Args: - in_channels (int): Number of channels in the input image - out_channels (int): Number of channels produced by the convolution - kernel_size (int or tuple): Size of the convolving kernel - stride (int or tuple, optional): Stride of the convolution - padding (int or tuple, optional): Zero-padding added to both sides of the input - output_padding (int or tuple, optional): Zero-padding added to one side of the output - groups (int, optional): Number of blocked connections from input channels to output channels - bias (bool, optional): If True, adds a learnable bias to the output - - Shape: - - Input: :math:`(N, C_{in}, H_{in}, W_{in})` - - Output: :math:`(N, C_{out}, H_{out}, W_{out})` where - :math:`H_{out} = (H_{in} - 1) * stride[0] - 2 * padding[0] + kernel\_size[0] + output\_padding[0]` - :math:`W_{out} = (W_{in} - 1) * stride[1] - 2 * padding[1] + kernel\_size[1] + output\_padding[1]` - - Attributes: - weight (Tensor): the learnable weights of the module of shape - (in_channels, out_channels, kernel_size[0], kernel_size[1]) - bias (Tensor): the learnable bias of the module of shape (out_channels) - - Examples:: - - >>> # With square kernels and equal stride - >>> m = nn.ConvTranspose2d(16, 33, 3, stride=2) - >>> # non-square kernels and unequal stride and with padding - >>> m = nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2)) - >>> input = autograd.Variable(torch.randn(20, 16, 50, 100)) - >>> output = m(input) - >>> # exact output size can be also specified as an argument - >>> input = autograd.Variable(torch.randn(1, 16, 12, 12)) - >>> downsample = nn.Conv2d(16, 16, 3, stride=2, padding=1) - >>> upsample = nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1) - >>> h = downsample(input) - >>> h.size() - torch.Size([1, 16, 6, 6]) - >>> output = upsample(h, output_size=input.size()) - >>> output.size() - torch.Size([1, 16, 12, 12]) - - .. _cross-correlation: - https://en.wikipedia.org/wiki/Cross-correlation - - .. _link: - https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md - """ - - def __init__(self, in_channels, out_channels, kernel_size, stride=1, - padding=0, output_padding=0, groups=1, bias=True): - kernel_size = _pair(kernel_size) - stride = _pair(stride) - padding = _pair(padding) - dilation = _pair(1) - output_padding = _pair(output_padding) - super(ConvTranspose2d, self).__init__( - in_channels, out_channels, kernel_size, stride, padding, dilation, - True, output_padding, groups, bias) - - def forward(self, input, output_size=None): - output_padding = self._output_padding(input, output_size) - return F.conv_transpose2d( - input, self.weight, self.bias, self.stride, self.padding, - output_padding, self.groups)
    - - -
    [docs]class ConvTranspose3d(_ConvTransposeMixin, _ConvNd): - r"""Applies a 3D transposed convolution operator over an input image composed of several input - planes. - The transposed convolution operator multiplies each input value element-wise by a learnable kernel, - and sums over the outputs from all input feature planes. - - **This module can be seen as the exact reverse of Conv3d**. - It is sometimes (but incorrectly) refered to as a deconvolutional operation. - - | :attr:`stride` controls the stride for the cross-correlation. - | If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides - for :attr:`padding` number of points - | If :attr:`output_padding` is non-zero, then the output is implicitly zero-padded on one side - for :attr:`output_padding` number of points - | :attr:`groups` controls the connections between inputs and outputs. - | At groups=1, all inputs are convolved to all outputs. - | At groups=2, the operation becomes equivalent to having two conv layers - side by side, each seeing half the input channels, - and producing half the output channels, and both subsequently concatenated. - - The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`output_padding` - can either be: - - - a single ``int`` -- in which case the same value is used for the height and width dimension - - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension, - the second `int` for the width dimension and the third `int` for the width dimension - - .. note:: - - Depending of the size of your kernel, several (of the last) - columns of the input might be lost, because it is a valid `cross-correlation`_, - and not a full `cross-correlation`_. - It is up to the user to add proper padding. - - Args: - in_channels (int): Number of channels in the input image - out_channels (int): Number of channels produced by the convolution - kernel_size (int or tuple): Size of the convolving kernel - stride (int or tuple, optional): Stride of the convolution - padding (int or tuple, optional): Zero-padding added to both sides of the input - output_padding (int or tuple, optional): Zero-padding added to one side of the output - groups (int, optional): Number of blocked connections from input channels to output channels - bias (bool, optional): If True, adds a learnable bias to the output - - Shape: - - Input: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` - - Output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` where - :math:`D_{out} = (D_{in} - 1) * stride[0] - 2 * padding[0] + kernel\_size[0] + output\_padding[0]` - :math:`H_{out} = (H_{in} - 1) * stride[1] - 2 * padding[1] + kernel\_size[1] + output\_padding[1]` - :math:`W_{out} = (W_{in} - 1) * stride[2] - 2 * padding[2] + kernel\_size[2] + output\_padding[2]` - - Attributes: - weight (Tensor): the learnable weights of the module of shape - (in_channels, out_channels, kernel_size[0], kernel_size[1], kernel_size[2]) - bias (Tensor): the learnable bias of the module of shape (out_channels) - - Examples:: - - >>> # With square kernels and equal stride - >>> m = nn.ConvTranspose3d(16, 33, 3, stride=2) - >>> # non-square kernels and unequal stride and with padding - >>> m = nn.Conv3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(0, 4, 2)) - >>> input = autograd.Variable(torch.randn(20, 16, 10, 50, 100)) - >>> output = m(input) - - .. _cross-correlation: - https://en.wikipedia.org/wiki/Cross-correlation - - .. _link: - https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md - """ - - def __init__(self, in_channels, out_channels, kernel_size, stride=1, - padding=0, output_padding=0, groups=1, bias=True): - kernel_size = _triple(kernel_size) - stride = _triple(stride) - padding = _triple(padding) - dilation = _triple(1) - output_padding = _triple(output_padding) - super(ConvTranspose3d, self).__init__( - in_channels, out_channels, kernel_size, stride, padding, dilation, - True, output_padding, groups, bias) - - def forward(self, input, output_size=None): - output_padding = self._output_padding(input, output_size) - return F.conv_transpose3d( - input, self.weight, self.bias, self.stride, self.padding, - output_padding, self.groups)
    - - -# TODO: Conv2dLocal -# TODO: Conv2dMap -# TODO: ConvTranspose2dMap -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/nn/modules/dropout.html b/docs/_modules/torch/nn/modules/dropout.html deleted file mode 100644 index 62aeb573e26a..000000000000 --- a/docs/_modules/torch/nn/modules/dropout.html +++ /dev/null @@ -1,700 +0,0 @@ - - - - - - - - - - - torch.nn.modules.dropout — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.nn.modules.dropout

    -from .module import Module
    -from .. import functional as F
    -
    -
    -
    [docs]class Dropout(Module): - r"""Randomly zeroes some of the elements of the input tensor. - The elements to zero are randomized on every forward call. - - Args: - p: probability of an element to be zeroed. Default: 0.5 - inplace: If set to True, will do this operation in-place. Default: false - - Shape: - - Input: `Any`. Input can be of any shape - - Output: `Same`. Output is of the same shape as input - - Examples:: - - >>> m = nn.Dropout(p=0.2) - >>> input = autograd.Variable(torch.randn(20, 16)) - >>> output = m(input) - """ - - def __init__(self, p=0.5, inplace=False): - super(Dropout, self).__init__() - if p < 0 or p > 1: - raise ValueError("dropout probability has to be between 0 and 1, " - "but got {}".format(p)) - self.p = p - self.inplace = inplace - - def forward(self, input): - return F.dropout(input, self.p, self.training, self.inplace) - - def __repr__(self): - inplace_str = ', inplace' if self.inplace else '' - return self.__class__.__name__ + ' (' \ - + 'p = ' + str(self.p) \ - + inplace_str + ')'
    - - -
    [docs]class Dropout2d(Module): - r"""Randomly zeroes whole channels of the input tensor. - The channels to zero-out are randomized on every forward call. - - *Usually the input comes from Conv2d modules.* - - As described in the paper - `Efficient Object Localization Using Convolutional Networks`_ , - if adjacent pixels within feature maps are strongly correlated - (as is normally the case in early convolution layers) then iid dropout - will not regularize the activations and will otherwise just result - in an effective learning rate decrease. - - In this case, :func:`nn.Dropout2d` will help promote independence between - feature maps and should be used instead. - - Args: - p (float, optional): probability of an element to be zeroed. - inplace (bool, optional): If set to True, will do this operation in-place - - Shape: - - Input: :math:`(N, C, H, W)` - - Output: :math:`(N, C, H, W)` (same shape as input) - - Examples:: - - >>> m = nn.Dropout2d(p=0.2) - >>> input = autograd.Variable(torch.randn(20, 16, 32, 32)) - >>> output = m(input) - - .. _Efficient Object Localization Using Convolutional Networks: - http://arxiv.org/abs/1411.4280 - """ - - def __init__(self, p=0.5, inplace=False): - super(Dropout2d, self).__init__() - if p < 0 or p > 1: - raise ValueError("dropout probability has to be between 0 and 1, " - "but got {}".format(p)) - self.p = p - self.inplace = inplace - - def forward(self, input): - return self._backend.Dropout2d(self.p, self.training, self.inplace)(input) - - def __repr__(self): - inplace_str = ', inplace' if self.inplace else '' - return self.__class__.__name__ + ' (' \ - + 'p=' + str(self.p) \ - + inplace_str + ')'
    - - -
    [docs]class Dropout3d(Module): - r"""Randomly zeroes whole channels of the input tensor. - The channels to zero are randomized on every forward call. - - *Usually the input comes from Conv3d modules.* - - As described in the paper - `Efficient Object Localization Using Convolutional Networks`_ , - if adjacent pixels within feature maps are strongly correlated - (as is normally the case in early convolution layers) then iid dropout - will not regularize the activations and will otherwise just result - in an effective learning rate decrease. - - In this case, :func:`nn.Dropout3d` will help promote independence between - feature maps and should be used instead. - - Args: - p (float, optional): probability of an element to be zeroed. - inplace (bool, optional): If set to True, will do this operation in-place - - Shape: - - Input: :math:`(N, C, D, H, W)` - - Output: :math:`(N, C, D, H, W)` (same shape as input) - - Examples:: - - >>> m = nn.Dropout3d(p=0.2) - >>> input = autograd.Variable(torch.randn(20, 16, 4, 32, 32)) - >>> output = m(input) - - .. _Efficient Object Localization Using Convolutional Networks: - http://arxiv.org/abs/1411.4280 - """ - - def __init__(self, p=0.5, inplace=False): - super(Dropout3d, self).__init__() - if p < 0 or p > 1: - raise ValueError("dropout probability has to be between 0 and 1, " - "but got {}".format(p)) - self.p = p - self.inplace = inplace - - def forward(self, input): - return self._backend.Dropout3d(self.p, self.training, self.inplace)(input) - - def __repr__(self): - inplace_str = ', inplace' if self.inplace else '' - return self.__class__.__name__ + ' (' \ - + 'p=' + str(self.p) \ - + inplace_str + ')'
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/nn/modules/linear.html b/docs/_modules/torch/nn/modules/linear.html deleted file mode 100644 index 84c5bb36f9d6..000000000000 --- a/docs/_modules/torch/nn/modules/linear.html +++ /dev/null @@ -1,620 +0,0 @@ - - - - - - - - - - - torch.nn.modules.linear — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.nn.modules.linear

    -import math
    -
    -import torch
    -from torch.nn.parameter import Parameter
    -
    -from .module import Module
    -
    -
    -
    [docs]class Linear(Module): - r"""Applies a linear transformation to the incoming data: :math:`y = Ax + b` - - Args: - in_features: size of each input sample - out_features: size of each output sample - bias: If set to False, the layer will not learn an additive bias. Default: True - - Shape: - - Input: :math:`(N, in\_features)` - - Output: :math:`(N, out\_features)` - - Attributes: - weight: the learnable weights of the module of shape (out_features x in_features) - bias: the learnable bias of the module of shape (out_features) - - Examples:: - - >>> m = nn.Linear(20, 30) - >>> input = autograd.Variable(torch.randn(128, 20)) - >>> output = m(input) - >>> print(output.size()) - """ - - def __init__(self, in_features, out_features, bias=True): - super(Linear, self).__init__() - self.in_features = in_features - self.out_features = out_features - self.weight = Parameter(torch.Tensor(out_features, in_features)) - if bias: - self.bias = Parameter(torch.Tensor(out_features)) - else: - self.register_parameter('bias', None) - self.reset_parameters() - - def reset_parameters(self): - stdv = 1. / math.sqrt(self.weight.size(1)) - self.weight.data.uniform_(-stdv, stdv) - if self.bias is not None: - self.bias.data.uniform_(-stdv, stdv) - - def forward(self, input): - if self.bias is None: - return self._backend.Linear()(input, self.weight) - else: - return self._backend.Linear()(input, self.weight, self.bias) - - def __repr__(self): - return self.__class__.__name__ + ' (' \ - + str(self.in_features) + ' -> ' \ - + str(self.out_features) + ')'
    - - -# TODO: Bilinear -# TODO: PartialLinear - maybe in sparse? -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/nn/modules/loss.html b/docs/_modules/torch/nn/modules/loss.html deleted file mode 100644 index 72b7afb7ee0b..000000000000 --- a/docs/_modules/torch/nn/modules/loss.html +++ /dev/null @@ -1,989 +0,0 @@ - - - - - - - - - - - torch.nn.modules.loss — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.nn.modules.loss

    -from torch.autograd import Variable
    -import torch
    -from .module import Module
    -from .container import Sequential
    -from .activation import LogSoftmax
    -from .. import functional as F
    -
    -
    -def _assert_no_grad(variable):
    -    assert not variable.requires_grad, \
    -        "nn criterions don't compute the gradient w.r.t. targets - please " \
    -        "mark these variables as volatile or not requiring gradients"
    -
    -
    -class _Loss(Module):
    -
    -    def __init__(self, size_average=True):
    -        super(_Loss, self).__init__()
    -        self.size_average = size_average
    -
    -    def forward(self, input, target):
    -        _assert_no_grad(target)
    -        backend_fn = getattr(self._backend, type(self).__name__)
    -        return backend_fn(self.size_average)(input, target)
    -
    -
    -class _WeightedLoss(_Loss):
    -
    -    def __init__(self, weight=None, size_average=True):
    -        super(_WeightedLoss, self).__init__(size_average)
    -        self.register_buffer('weight', weight)
    -
    -    def forward(self, input, target):
    -        _assert_no_grad(target)
    -        backend_fn = getattr(self._backend, type(self).__name__)
    -        return backend_fn(self.size_average, weight=self.weight)(input, target)
    -
    -
    -
    [docs]class L1Loss(_Loss): - r"""Creates a criterion that measures the mean absolute value of the - element-wise difference between input `x` and target `y`: - - :math:`{loss}(x, y) = 1/n \sum |x_i - y_i|` - - `x` and `y` arbitrary shapes with a total of `n` elements each. - - The sum operation still operates over all the elements, and divides by `n`. - - The division by `n` can be avoided if one sets the constructor argument `sizeAverage=False` - """ - pass
    - - -
    [docs]class NLLLoss(_WeightedLoss): - r"""The negative log likelihood loss. It is useful to train a classification problem with n classes - - If provided, the optional argument `weights` should be a 1D Tensor assigning - weight to each of the classes. - - This is particularly useful when you have an unbalanced training set. - - The input given through a forward call is expected to contain log-probabilities - of each class: input has to be a 2D Tensor of size `(minibatch, n)` - - Obtaining log-probabilities in a neural network is easily achieved by - adding a `LogSoftmax` layer in the last layer of your network. - - You may use `CrossEntropyLoss` instead, if you prefer not to add an extra layer. - - The target that this loss expects is a class index `(0 to N-1, where N = number of classes)` - - The loss can be described as:: - - loss(x, class) = -x[class] - - or in the case of the weights argument it is specified as follows:: - - loss(x, class) = -weights[class] * x[class] - - Args: - weight (Tensor, optional): a manual rescaling weight given to each class. - If given, has to be a Tensor of size "nclasses" - size_average (bool, optional): By default, the losses are averaged over observations for each minibatch. - However, if the field sizeAverage is set to False, - the losses are instead summed for each minibatch. - - - Shape: - - Input: :math:`(N, C)` where `C = number of classes` - - Target: :math:`(N)` where each value is `0 <= targets[i] <= C-1` - - Attributes: - weight: the class-weights given as input to the constructor - - Examples:: - - >>> m = nn.LogSoftmax() - >>> loss = nn.NLLLoss() - >>> # input is of size nBatch x nClasses = 3 x 5 - >>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True) - >>> # each element in target has to have 0 <= value < nclasses - >>> target = autograd.Variable(torch.LongTensor([1, 0, 4])) - >>> output = loss(m(input), target) - >>> output.backward() - """ - pass
    - - -
    [docs]class NLLLoss2d(_WeightedLoss): - r"""This is negative log likehood loss, but for image inputs. It computes NLL loss per-pixel. - - Args: - weight (Tensor, optional): a manual rescaling weight given to each class. - If given, has to be a 1D Tensor having as many elements, as there are classes. - size_average: By default, the losses are averaged over observations for each minibatch. - However, if the field sizeAverage is set to False, the losses - are instead summed for each minibatch. Default: True - - Shape: - - Input: :math:`(N, C, H, W)` where `C = number of classes` - - Target: :math:`(N, H, W)` where each value is `0 <= targets[i] <= C-1` - - Examples: - >>> m = nn.Conv2d(16, 32, (3, 3)).float() - >>> loss = nn.NLLLoss2d() - >>> # input is of size nBatch x nClasses x height x width - >>> input = autograd.Variable(torch.randn(3, 16, 10, 10)) - >>> # each element in target has to have 0 <= value < nclasses - >>> target = autograd.Variable(torch.LongTensor(3, 8, 8).random_(0, 4)) - >>> output = loss(m(input), target) - >>> output.backward() - """ - pass
    - - -
    [docs]class KLDivLoss(_WeightedLoss): - r"""The `Kullback-Leibler divergence`_ Loss - - KL divergence is a useful distance measure for continuous distributions - and is often useful when performing direct regression over the space of - (discretely sampled) continuous output distributions. - - As with `NLLLoss`, the `input` given is expected to contain - *log-probabilities*, however unlike `ClassNLLLoss`, `input` is not - restricted to a 2D Tensor, because the criterion is applied element-wise. - - This criterion expects a `target` `Tensor` of the same size as the - `input` `Tensor`. - - The loss can be described as: - - .. math:: loss(x, target) = 1/n \sum(target_i * (log(target_i) - x_i)) - - By default, the losses are averaged for each minibatch over observations - **as well as** over dimensions. However, if the field - `sizeAverage` is set to `False`, the losses are instead summed. - - .. _Kullback-Leibler divergence: - https://en.wikipedia.org/wiki/Kullback-Leibler_divergence - """ - pass
    - - -
    [docs]class MSELoss(_Loss): - r"""Creates a criterion that measures the mean squared error between - `n` elements in the input `x` and target `y`: - - :math:`{loss}(x, y) = 1/n \sum |x_i - y_i|^2` - - `x` and `y` arbitrary shapes with a total of `n` elements each. - - The sum operation still operates over all the elements, and divides by `n`. - - The division by `n` can be avoided if one sets the internal variable - `sizeAverage` to `False`. - - """ - pass
    - - -
    [docs]class BCELoss(_WeightedLoss): - r"""Creates a criterion that measures the Binary Cross Entropy - between the target and the output: - - .. math:: loss(o, t) = - 1/n \sum_i (t[i] * log(o[i]) + (1 - t[i]) * log(1 - o[i])) - - or in the case of the weights argument being specified: - - .. math:: loss(o, t) = - 1/n \sum_i weights[i] * (t[i] * log(o[i]) + (1 - t[i]) * log(1 - o[i])) - - This is used for measuring the error of a reconstruction in for example - an auto-encoder. Note that the targets `t[i]` should be numbers between 0 and 1. - - By default, the losses are averaged for each minibatch over observations - *as well as* over dimensions. However, if the field `sizeAverage` is set - to `False`, the losses are instead summed. - - """ - pass
    - - -
    [docs]class HingeEmbeddingLoss(_Loss): - r"""Measures the loss given an input `x` which is a 2D mini-batch tensor - and a labels `y`, a 1D tensor containg values (`1` or `-1`). - This is usually used for measuring whether two inputs are similar or dissimilar, - e.g. using the L1 pairwise distance, and is typically used for learning - nonlinear embeddings or semi-supervised learning:: - - { x_i, if y_i == 1 - loss(x, y) = 1/n { - { max(0, margin - x_i), if y_i == -1 - - `x` and `y` arbitrary shapes with a total of `n` elements each - the sum operation still operates over all the elements, and divides by `n`. - - The division by `n` can be avoided if one sets the internal variable `sizeAverage=False`. - - The `margin` has a default value of `1`, or can be set in the constructor. - """ - pass
    - - -
    [docs]class MultiLabelMarginLoss(_Loss): - r"""Creates a criterion that optimizes a multi-class multi-classification - hinge loss (margin-based loss) between input `x` (a 2D mini-batch `Tensor`) and - output `y` (which is a 2D `Tensor` of target class indices). - For each sample in the mini-batch:: - - loss(x, y) = sum_ij(max(0, 1 - (x[y[j]] - x[i]))) / x.size(0) - - where `i == 0` to `x.size(0)`, `j == 0` to `y.size(0)`, - `y[j] != 0`, and `i != y[j]` for all `i` and `j`. - - `y` and `x` must have the same size. - - The criterion only considers the first non zero `y[j]` targets. - - This allows for different samples to have variable amounts of target classes - """ - pass
    - - -
    [docs]class SmoothL1Loss(_Loss): - r"""Creates a criterion that uses a squared term if the absolute - element-wise error falls below 1 and an L1 term otherwise. - It is less sensitive to outliers than the `MSELoss` and in some cases - prevents exploding gradients (e.g. see "Fast R-CNN" paper by Ross Girshick). - Also known as the Huber loss:: - - { 0.5 * (x_i - y_i)^2, if |x_i - y_i| < 1 - loss(x, y) = 1/n \sum { - { |x_i - y_i| - 0.5, otherwise - - `x` and `y` arbitrary shapes with a total of `n` elements each - the sum operation still operates over all the elements, and divides by `n`. - - The division by `n` can be avoided if one sets the internal variable - `sizeAverage` to `False` - """ - pass
    - - -
    [docs]class SoftMarginLoss(_Loss): - r"""Creates a criterion that optimizes a two-class classification - logistic loss between input `x` (a 2D mini-batch Tensor) and - target `y` (which is a tensor containing either `1` or `-1`). - - :: - - loss(x, y) = sum_i (log(1 + exp(-y[i]*x[i]))) / x.nelement() - - The normalization by the number of elements in the input can be disabled by - setting `self.sizeAverage` to `False`. - """ - pass
    - - -
    [docs]class CrossEntropyLoss(_WeightedLoss): - r"""This criterion combines `LogSoftMax` and `NLLLoss` in one single class. - - It is useful when training a classification problem with `n` classes. - If provided, the optional argument `weights` should be a 1D `Tensor` - assigning weight to each of the classes. - This is particularly useful when you have an unbalanced training set. - - The `input` is expected to contain scores for each class. - - `input` has to be a 2D `Tensor` of size `batch x n`. - - This criterion expects a class index (0 to nClasses-1) as the - `target` for each value of a 1D tensor of size `n` - - The loss can be described as:: - - loss(x, class) = -log(exp(x[class]) / (\sum_j exp(x[j]))) - = -x[class] + log(\sum_j exp(x[j])) - - or in the case of the `weights` argument being specified:: - - loss(x, class) = weights[class] * (-x[class] + log(\sum_j exp(x[j]))) - - The losses are averaged across observations for each minibatch. - - Shape: - - Input: :math:`(N, C)` where `C = number of classes` - - Target: :math:`(N)` where each value is `0 <= targets[i] <= C-1` - - """ - - def forward(self, input, target): - _assert_no_grad(target) - return F.cross_entropy(input, target, - self.weight, self.size_average)
    - - -
    [docs]class MultiLabelSoftMarginLoss(_WeightedLoss): - r"""Creates a criterion that optimizes a multi-label one-versus-all - loss based on max-entropy, between input `x` (a 2D mini-batch `Tensor`) and - target `y` (a binary 2D `Tensor`). For each sample in the minibatch:: - - loss(x, y) = - sum_i (y[i] log( exp(x[i]) / (1 + exp(x[i]))) - + (1-y[i]) log(1/(1+exp(x[i])))) / x:nElement() - - where `i == 0` to `x.nElement()-1`, `y[i] in {0,1}`. - `y` and `x` must have the same size. - """ - - def forward(self, input, target): - return F.binary_cross_entropy(torch.sigmoid(input), target, - self.weight, self.size_average)
    - - -
    [docs]class CosineEmbeddingLoss(Module): - r"""Creates a criterion that measures the loss given an input tensors x1, x2 - and a `Tensor` label `y` with values 1 or -1. - This is used for measuring whether two inputs are similar or dissimilar, - using the cosine distance, and is typically used for learning nonlinear - embeddings or semi-supervised learning. - - `margin` should be a number from `-1` to `1`, `0` to `0.5` is suggested. - If `margin` is missing, the default value is `0`. - - The loss function for each sample is:: - - { 1 - cos(x1, x2), if y == 1 - loss(x, y) = { - { max(0, cos(x1, x2) - margin), if y == -1 - - If the internal variable `sizeAverage` is equal to `True`, - the loss function averages the loss over the batch samples; - if `sizeAverage` is `False`, then the loss function sums over the - batch samples. By default, `sizeAverage = True`. - """ - - def __init__(self, margin=0, size_average=True): - super(CosineEmbeddingLoss, self).__init__() - self.margin = margin - self.size_average = size_average - - def forward(self, input1, input2, target): - return self._backend.CosineEmbeddingLoss(self.margin, - self.size_average)(input1, input2, target)
    - - -
    [docs]class MarginRankingLoss(Module): - r"""Creates a criterion that measures the loss given - inputs `x1`, `x2`, two 1D min-batch `Tensor`s, - and a label 1D mini-batch tensor `y` with values (`1` or `-1`). - - If `y == 1` then it assumed the first input should be ranked higher - (have a larger value) than the second input, and vice-versa for `y == -1`. - - The loss function for each sample in the mini-batch is:: - - loss(x, y) = max(0, -y * (x1 - x2) + margin) - - if the internal variable `sizeAverage = True`, - the loss function averages the loss over the batch samples; - if `sizeAverage = False`, then the loss function sums over the batch samples. - By default, `sizeAverage` equals to `True`. - """ - - def __init__(self, margin=0, size_average=True): - super(MarginRankingLoss, self).__init__() - self.margin = margin - self.size_average = size_average - - def forward(self, input1, input2, target): - return self._backend.MarginRankingLoss(self.margin, - self.size_average)(input1, input2, target)
    - - -
    [docs]class MultiMarginLoss(Module): - r"""Creates a criterion that optimizes a multi-class classification hinge loss - (margin-based loss) between input `x` (a 2D mini-batch `Tensor`) and - output `y` (which is a 1D tensor of target class indices, `0` <= `y` <= `x.size(1)`): - - For each mini-batch sample:: - - loss(x, y) = sum_i(max(0, (margin - x[y] + x[i]))^p) / x.size(0) - where `i == 0` to `x.size(0)` and `i != y`. - - Optionally, you can give non-equal weighting on the classes by passing - a 1D `weights` tensor into the constructor. - - The loss function then becomes: - - loss(x, y) = sum_i(max(0, w[y] * (margin - x[y] - x[i]))^p) / x.size(0) - - By default, the losses are averaged over observations for each minibatch. - However, if the field `sizeAverage` is set to `False`, - the losses are instead summed. - """ - - def __init__(self, p=1, margin=1, weight=None, size_average=True): - super(MultiMarginLoss, self).__init__() - if p != 1 and p != 2: - raise ValueError("only p == 1 and p == 2 supported") - assert weight is None or weight.dim() == 1 - self.p = p - self.margin = margin - self.size_average = size_average - self.weight = weight - - def forward(self, input, target): - return self._backend.MultiMarginLoss(self.size_average, self.p, - self.margin, weight=self.weight)(input, target)
    - - -# TODO: L1HingeEmbeddingCriterion -# TODO: MSECriterion weight -# TODO: ClassSimplexCriterion -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/nn/modules/module.html b/docs/_modules/torch/nn/modules/module.html deleted file mode 100644 index 69745a77460e..000000000000 --- a/docs/_modules/torch/nn/modules/module.html +++ /dev/null @@ -1,965 +0,0 @@ - - - - - - - - - - - torch.nn.modules.module — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.nn.modules.module

    -from itertools import chain
    -from collections import OrderedDict
    -import functools
    -
    -import torch
    -from ..backends.thnn import backend as thnn_backend
    -from ..parameter import Parameter
    -from torch.autograd import Variable
    -import torch.utils.hooks as hooks
    -
    -
    -def _addindent(s_, numSpaces):
    -    s = s_.split('\n')
    -    # dont do anything for single-line stuff
    -    if len(s) == 1:
    -        return s_
    -    first = s.pop(0)
    -    s = [(numSpaces * ' ') + line for line in s]
    -    s = '\n'.join(s)
    -    s = first + '\n' + s
    -    return s
    -
    -
    -
    [docs]class Module(object): - """Base class for all neural network modules. - - Your models should also subclass this class. - - Modules can also contain other Modules, allowing to nest them in - a tree structure. You can assign the submodules as regular attributes:: - - import torch.nn as nn - import torch.nn.functional as F - - class Model(nn.Module): - def __init__(self): - super(Model, self).__init__() - self.conv1 = nn.Conv2d(1, 20, 5) - self.conv2 = nn.Conv2d(20, 20, 5) - - def forward(self, x): - x = F.relu(self.conv1(x)) - return F.relu(self.conv2(x)) - - Submodules assigned in this way will be registered, and will have their - parameters converted too when you call .cuda(), etc. - """ - - dump_patches = False - - def __init__(self): - self._backend = thnn_backend - self._parameters = OrderedDict() - self._buffers = OrderedDict() - self._backward_hooks = OrderedDict() - self._forward_hooks = OrderedDict() - self._modules = OrderedDict() - self.training = True - -
    [docs] def forward(self, *input): - """Defines the computation performed at every call. - - Should be overriden by all subclasses. - """ - raise NotImplementedError
    - -
    [docs] def register_buffer(self, name, tensor): - """Adds a persistent buffer to the module. - - This is typically used to register a buffer that should not to be - considered a model parameter. For example, BatchNorm's ``running_mean`` - is not a parameter, but is part of the persistent state. - - Buffers can be accessed as attributes using given names. - - Example: - >>> self.register_buffer('running_mean', torch.zeros(num_features)) - """ - self._buffers[name] = tensor
    - -
    [docs] def register_parameter(self, name, param): - """Adds a parameter to the module. - - The parameter can be accessed as an attribute using given name. - """ - if '_parameters' not in self.__dict__: - raise AttributeError( - "cannot assign parameter before Module.__init__() call") - if param is None: - self._parameters[name] = None - elif not isinstance(param, Parameter): - raise TypeError("cannot assign '{}' object to parameter '{}' " - "(torch.nn.Parameter or None required)" - .format(torch.typename(param), name)) - elif param.creator: - raise ValueError( - "Cannot assign non-leaf Variable to parameter '{0}'. Model " - "parameters must be created explicitly. To express '{0}' " - "as a function of another variable, compute the value in " - "the forward() method.".format(name)) - else: - self._parameters[name] = param
    - - def add_module(self, name, module): - if hasattr(self, name): - raise KeyError("attribute already exists '{}'".format(name)) - if not isinstance(module, Module) and module is not None: - raise TypeError("{} is not a Module subclass".format( - torch.typename(module))) - self._modules[name] = module - - def _apply(self, fn): - for module in self.children(): - module._apply(fn) - - for param in self._parameters.values(): - if param is not None: - # Variables stored in modules are graph leaves, and we don't - # want to create copy nodes, so we have to unpack the data. - param.data = fn(param.data) - if param._grad is not None: - param._grad.data = fn(param._grad.data) - - for key, buf in self._buffers.items(): - if buf is not None: - self._buffers[key] = fn(buf) - - return self - - def apply(self, fn): - for module in self.children(): - module.apply(fn) - fn(self) - return self - -
    [docs] def cuda(self, device_id=None): - """Moves all model parameters and buffers to the GPU. - - Arguments: - device_id (int, optional): if specified, all parameters will be - copied to that device - """ - return self._apply(lambda t: t.cuda(device_id))
    - -
    [docs] def cpu(self, device_id=None): - """Moves all model parameters and buffers to the CPU.""" - return self._apply(lambda t: t.cpu())
    - - def type(self, dst_type): - return self._apply(lambda t: t.type(dst_type)) - -
    [docs] def float(self): - """Casts all parameters and buffers to float datatype.""" - return self._apply(lambda t: t.float())
    - -
    [docs] def double(self): - """Casts all parameters and buffers to double datatype.""" - return self._apply(lambda t: t.double())
    - -
    [docs] def half(self): - """Casts all parameters and buffers to half datatype.""" - return self._apply(lambda t: t.half())
    - -
    [docs] def register_backward_hook(self, hook): - """Registers a backward hook on the module. - - The hook will be called every time the gradients with respect to module - inputs are computed. The hook should have the following signature:: - - hook(module, grad_input, grad_output) -> Tensor or None - - The :attr:`grad_input` and :attr:`grad_output` may be tuples if the - module has multiple inputs or outputs. The hook should not modify its - arguments, but it can optionally return a new gradient with respect to - input that will be used in place of :attr:`grad_input` in subsequent - computations. - - This function returns a handle with a method ``handle.remove()`` - that removes the hook from the module. - """ - handle = hooks.RemovableHandle(self._backward_hooks) - self._backward_hooks[handle.id] = hook - return handle
    - -
    [docs] def register_forward_hook(self, hook): - """Registers a forward hook on the module. - - The hook will be called every time :func:`forward` computes an output. - It should have the following signature:: - - hook(module, input, output) -> None - - The hook should not modify the input or output. - This function returns a handle with a method ``handle.remove()`` - that removes the hook from the module. - """ - handle = hooks.RemovableHandle(self._forward_hooks) - self._forward_hooks[handle.id] = hook - return handle
    - - def __call__(self, *input, **kwargs): - result = self.forward(*input, **kwargs) - for hook in self._forward_hooks.values(): - hook_result = hook(self, input, result) - if hook_result is not None: - raise RuntimeError( - "forward hooks should never return any values, but '{}'" - "didn't return None".format(hook)) - var = result - while not isinstance(var, Variable): - var = var[0] - creator = var.creator - if creator is not None and len(self._backward_hooks) > 0: - for hook in self._backward_hooks.values(): - wrapper = functools.partial(hook, self) - functools.update_wrapper(wrapper, hook) - creator.register_hook(wrapper) - return result - - def __getattr__(self, name): - if '_parameters' in self.__dict__: - _parameters = self.__dict__['_parameters'] - if name in _parameters: - return _parameters[name] - if '_buffers' in self.__dict__: - _buffers = self.__dict__['_buffers'] - if name in _buffers: - return _buffers[name] - if '_modules' in self.__dict__: - modules = self.__dict__['_modules'] - if name in modules: - return modules[name] - return object.__getattr__(self, name) - - def __setattr__(self, name, value): - def remove_from(*dicts): - for d in dicts: - if name in d: - del d[name] - - params = self.__dict__.get('_parameters') - if isinstance(value, Parameter): - if params is None: - raise AttributeError( - "cannot assign parameters before Module.__init__() call") - remove_from(self.__dict__, self._buffers, self._modules) - self.register_parameter(name, value) - elif params is not None and name in params: - if value is not None: - raise TypeError("cannot assign '{}' as parameter '{}' " - "(torch.nn.Parameter or None expected)" - .format(torch.typename(value), name)) - self.register_parameter(name, value) - else: - modules = self.__dict__.get('_modules') - if isinstance(value, Module): - if modules is None: - raise AttributeError( - "cannot assign module before Module.__init__() call") - remove_from(self.__dict__, self._parameters, self._buffers) - modules[name] = value - elif modules is not None and name in modules: - if value is not None: - raise TypeError("cannot assign '{}' as child module '{}' " - "(torch.nn.Module or None expected)" - .format(torch.typename(value), name)) - modules[name] = value - else: - buffers = self.__dict__.get('_buffers') - if buffers is not None and name in buffers: - if value is not None and not torch.is_tensor(value): - raise TypeError("cannot assign '{}' as buffer '{}' " - "(torch.Tensor or None expected)" - .format(torch.typename(value), name)) - buffers[name] = value - else: - object.__setattr__(self, name, value) - - def __delattr__(self, name): - if name in self._parameters: - del self._parameters[name] - elif name in self._buffers: - del self._buffers[name] - elif name in self._modules: - del self._modules[name] - else: - object.__delattr__(self, name) - -
    [docs] def state_dict(self, destination=None, prefix=''): - """Returns a dictionary containing a whole state of the module. - - Both parameters and persistent buffers (e.g. running averages) are - included. Keys are corresponding parameter and buffer names. - - Example: - >>> module.state_dict().keys() - ['bias', 'weight'] - """ - if destination is None: - destination = OrderedDict() - for name, param in self._parameters.items(): - if param is not None: - destination[prefix + name] = param.data - for name, buf in self._buffers.items(): - if buf is not None: - destination[prefix + name] = buf - for name, module in self._modules.items(): - if module is not None: - module.state_dict(destination, prefix + name + '.') - return destination
    - -
    [docs] def load_state_dict(self, state_dict): - """Copies parameters and buffers from :attr:`state_dict` into - this module and its descendants. The keys of :attr:`state_dict` must - exactly match the keys returned by this module's :func:`state_dict()` - function. - - Arguments: - state_dict (dict): A dict containing parameters and - persistent buffers. - """ - own_state = self.state_dict() - for name, param in state_dict.items(): - if name not in own_state: - raise KeyError('unexpected key "{}" in state_dict' - .format(name)) - if isinstance(param, Parameter): - # backwards compatibility for serialized parameters - param = param.data - own_state[name].copy_(param) - - missing = set(own_state.keys()) - set(state_dict.keys()) - if len(missing) > 0: - raise KeyError('missing keys in state_dict: "{}"'.format(missing))
    - -
    [docs] def parameters(self, memo=None): - """Returns an iterator over module parameters. - - This is typically passed to an optimizer. - - Example: - >>> for param in model.parameters(): - >>> print(type(param.data), param.size()) - <class 'torch.FloatTensor'> (20L,) - <class 'torch.FloatTensor'> (20L, 1L, 5L, 5L) - """ - if memo is None: - memo = set() - for p in self._parameters.values(): - if p is not None and p not in memo: - memo.add(p) - yield p - for module in self.children(): - for p in module.parameters(memo): - yield p
    - -
    [docs] def children(self): - """Returns an iterator over children modules.""" - memo = set() - for module in self._modules.values(): - if module is not None and module not in memo: - memo.add(module) - yield module
    - - def modules(self, memo=None): - if memo is None: - memo = set() - if self not in memo: - memo.add(self) - yield self - for module in self.children(): - for m in module.modules(memo): - yield m - -
    [docs] def train(self, mode=True): - """Sets the module in training mode. - - This has any effect only on modules such as Dropout or BatchNorm. - """ - self.training = mode - for module in self.children(): - module.train(mode) - return self
    - -
    [docs] def eval(self): - """Sets the module in evaluation mode. - - This has any effect only on modules such as Dropout or BatchNorm. - """ - return self.train(False)
    - -
    [docs] def zero_grad(self): - """Sets gradients of all model parameters to zero.""" - for p in self.parameters(): - if p.grad is not None: - p.grad.data.zero_()
    - - def share_memory(self): - return self._apply(lambda t: t.share_memory_()) - - def __repr__(self): - tmpstr = self.__class__.__name__ + ' (\n' - for key, module in self._modules.items(): - modstr = module.__repr__() - modstr = _addindent(modstr, 2) - tmpstr = tmpstr + ' (' + key + '): ' + modstr + '\n' - tmpstr = tmpstr + ')' - return tmpstr
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/nn/modules/pixelshuffle.html b/docs/_modules/torch/nn/modules/pixelshuffle.html deleted file mode 100644 index cc0444f0c320..000000000000 --- a/docs/_modules/torch/nn/modules/pixelshuffle.html +++ /dev/null @@ -1,600 +0,0 @@ - - - - - - - - - - - torch.nn.modules.pixelshuffle — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.nn.modules.pixelshuffle

    -from .module import Module
    -from .. import functional as F
    -
    -
    -
    [docs]class PixelShuffle(Module): - r"""Rearranges elements in a Tensor of shape :math:`(*, C * r^2, H, W]` to a - tensor of shape :math:`(C, H * r, W * r)`. - - This is useful for implementing efficient sub-pixel convolution - with a stride of :math:`1/r`. - - Look at the paper: - `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network`_ - by Shi et. al (2016) for more details - - Args: - upscale_factor (int): factor to increase spatial resolution by - - Shape: - - Input: :math:`(N, C * {upscale\_factor}^2, H, W)` - - Output: :math:`(N, C, H * {upscale\_factor}, W * {upscale\_factor})` - - Examples:: - - >>> ps = nn.PixelShuffle(3) - >>> input = autograd.Variable(torch.Tensor(1, 9, 4, 4)) - >>> output = ps(input) - >>> print(output.size()) - torch.Size([1, 1, 12, 12]) - - .. _Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network: - https://arxiv.org/abs/1609.05158 - """ - - def __init__(self, upscale_factor): - super(PixelShuffle, self).__init__() - self.upscale_factor = upscale_factor - - def forward(self, input): - return F.pixel_shuffle(input, self.upscale_factor) - - def __repr__(self): - return self.__class__.__name__ + ' (upscale_factor=' + str(self.upscale_factor) + ')'
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/nn/modules/pooling.html b/docs/_modules/torch/nn/modules/pooling.html deleted file mode 100644 index 1e933d34fe3a..000000000000 --- a/docs/_modules/torch/nn/modules/pooling.html +++ /dev/null @@ -1,1273 +0,0 @@ - - - - - - - - - - - torch.nn.modules.pooling — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.nn.modules.pooling

    -import torch
    -from torch.autograd import Variable
    -
    -from .module import Module
    -from .utils import _single, _pair, _triple
    -from .. import functional as F
    -
    -
    -
    [docs]class MaxPool1d(Module): - r"""Applies a 1D max pooling over an input signal composed of several input - planes. - - In the simplest case, the output value of the layer with input size :math:`(N, C, L)` - and output :math:`(N, C, L_{out})` can be precisely described as: - - .. math:: - - \begin{array}{ll} - out(N_i, C_j, k) = \max_{{m}=0}^{{kernel\_size}-1} input(N_i, C_j, stride * k + m) - \end{array} - - | If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides - for :attr:`padding` number of points - | :attr:`dilation` controls the spacing between the kernel points. It is harder to describe, - but this `link`_ has a nice visualization of what :attr:`dilation` does. - - Args: - kernel_size: the size of the window to take a max over - stride: the stride of the window. Default value is :attr:`kernel_size` - padding: implicit zero padding to be added on both sides - dilation: a parameter that controls the stride of elements in the window - return_indices: if True, will return the max indices along with the outputs. - Useful when Unpooling later - ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape - - Shape: - - Input: :math:`(N, C, L_{in})` - - Output: :math:`(N, C, L_{out})` where - :math:`L_{out} = floor((L_{in} + 2 * padding - dilation * (kernel\_size - 1) - 1) / stride + 1)` - - Examples:: - - >>> # pool of size=3, stride=2 - >>> m = nn.MaxPool1d(3, stride=2) - >>> input = autograd.Variable(torch.randn(20, 16, 50)) - >>> output = m(input) - - .. _link: - https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md - """ - - def __init__(self, kernel_size, stride=None, padding=0, dilation=1, - return_indices=False, ceil_mode=False): - super(MaxPool1d, self).__init__() - self.kernel_size = kernel_size - self.stride = stride or kernel_size - self.padding = padding - self.dilation = dilation - self.return_indices = return_indices - self.ceil_mode = ceil_mode - - def forward(self, input): - return F.max_pool1d(input, self.kernel_size, self.stride, - self.padding, self.dilation, self.ceil_mode, - self.return_indices) - - def __repr__(self): - return self.__class__.__name__ + ' (' \ - + 'size=' + str(self.kernel_size) \ - + ', stride=' + str(self.stride) \ - + ', padding=' + str(self.padding) \ - + ', dilation=' + str(self.dilation) \ - + ', ceil_mode=' + str(self.ceil_mode) + ')'
    - - -
    [docs]class MaxPool2d(Module): - r"""Applies a 2D max pooling over an input signal composed of several input - planes. - - In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`, - output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)` - can be precisely described as: - - .. math:: - - \begin{array}{ll} - out(N_i, C_j, h, w) = \max_{{m}=0}^{kH-1} \max_{{n}=0}^{kW-1} - input(N_i, C_j, stride[0] * h + m, stride[1] * w + n) - \end{array} - - | If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides - for :attr:`padding` number of points - | :attr:`dilation` controls the spacing between the kernel points. It is harder to describe, - but this `link`_ has a nice visualization of what :attr:`dilation` does. - - The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be: - - - a single ``int`` -- in which case the same value is used for the height and width dimension - - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension, - and the second `int` for the width dimension - - Args: - kernel_size: the size of the window to take a max over - stride: the stride of the window. Default value is :attr:`kernel_size` - padding: implicit zero padding to be added on both sides - dilation: a parameter that controls the stride of elements in the window - return_indices: if True, will return the max indices along with the outputs. - Useful when Unpooling later - ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape - - Shape: - - Input: :math:`(N, C, H_{in}, W_{in})` - - Output: :math:`(N, C, H_{out}, W_{out})` where - :math:`H_{out} = floor((H_{in} + 2 * padding[0] - dilation[0] * (kernel\_size[0] - 1) - 1) / stride[0] + 1)` - :math:`W_{out} = floor((W_{in} + 2 * padding[1] - dilation[1] * (kernel\_size[1] - 1) - 1) / stride[1] + 1)` - - Examples:: - - >>> # pool of square window of size=3, stride=2 - >>> m = nn.MaxPool2d(3, stride=2) - >>> # pool of non-square window - >>> m = nn.MaxPool2d((3, 2), stride=(2, 1)) - >>> input = autograd.Variable(torch.randn(20, 16, 50, 32)) - >>> output = m(input) - - .. _link: - https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md - """ - - def __init__(self, kernel_size, stride=None, padding=0, dilation=1, - return_indices=False, ceil_mode=False): - super(MaxPool2d, self).__init__() - self.kernel_size = kernel_size - self.stride = stride or kernel_size - self.padding = padding - self.dilation = dilation - self.return_indices = return_indices - self.ceil_mode = ceil_mode - - def forward(self, input): - return F.max_pool2d(input, self.kernel_size, self.stride, - self.padding, self.dilation, self.ceil_mode, - self.return_indices) - - def __repr__(self): - kh, kw = _pair(self.kernel_size) - dh, dw = _pair(self.stride) - padh, padw = _pair(self.padding) - dilh, dilw = _pair(self.dilation) - padding_str = ', padding=(' + str(padh) + ', ' + str(padw) + ')' \ - if padh != 0 and padw != 0 else '' - dilation_str = (', dilation=(' + str(dilh) + ', ' + str(dilw) + ')' - if dilh != 0 and dilw != 0 else '') - return self.__class__.__name__ + ' (' \ - + 'size=(' + str(kh) + ', ' + str(kw) + ')' \ - + ', stride=(' + str(dh) + ', ' + str(dw) + ')' \ - + padding_str + dilation_str + ')'
    - - -
    [docs]class MaxUnpool1d(Module): - r"""Computes a partial inverse of :class:`MaxPool1d`. - - :class:`MaxPool1d` is not fully invertible, since the non-maximal values are lost. - - :class:`MaxUnpool1d` takes in as input the output of :class:`MaxPool1d` - including the indices of the maximal values and computes a partial inverse - in which all non-maximal values are set to zero. - - .. note:: `MaxPool1d` can map several input sizes to the same output sizes. - Hence, the inversion process can get ambiguous. - To accommodate this, you can provide the needed output size - as an additional argument `output_size` in the forward call. - See the Inputs and Example below. - - Args: - kernel_size (int or tuple): Size of the max pooling window. - stride (int or tuple): Stride of the max pooling window. - It is set to ``kernel_size`` by default. - padding (int or tuple): Padding that was added to the input - - Inputs: - - `input`: the input Tensor to invert - - `indices`: the indices given out by `MaxPool1d` - - `output_size` (optional) : a `torch.Size` that specifies the targeted output size - - Shape: - - Input: :math:`(N, C, H_{in})` - - Output: :math:`(N, C, H_{out})` where - :math:`H_{out} = (H_{in} - 1) * stride[0] - 2 * padding[0] + kernel\_size[0]` - or as given by :attr:`output_size` in the call operator - - Example:: - - >>> pool = nn.MaxPool1d(2, stride=2, return_indices=True) - >>> unpool = nn.MaxUnpool1d(2, stride=2) - >>> input = Variable(torch.Tensor([[[1, 2, 3, 4, 5, 6, 7, 8]]])) - >>> output, indices = pool(input) - >>> unpool(output, indices) - Variable containing: - (0 ,.,.) = - 0 2 0 4 0 6 0 8 - [torch.FloatTensor of size 1x1x8] - - >>> # Example showcasing the use of output_size - >>> input = Variable(torch.Tensor([[[1, 2, 3, 4, 5, 6, 7, 8, 9]]])) - >>> output, indices = pool(input) - >>> unpool(output, indices, output_size=input.size()) - Variable containing: - (0 ,.,.) = - 0 2 0 4 0 6 0 8 0 - [torch.FloatTensor of size 1x1x9] - - >>> unpool(output, indices) - Variable containing: - (0 ,.,.) = - 0 2 0 4 0 6 0 8 - [torch.FloatTensor of size 1x1x8] - - """ - - def __init__(self, kernel_size, stride=None, padding=0): - super(MaxUnpool1d, self).__init__() - self.kernel_size = _single(kernel_size) - self.stride = _single(stride if stride is not None else kernel_size) - self.padding = _single(padding) - - def forward(self, input, indices, output_size=None): - return F.max_unpool1d(input, indices, self.kernel_size, self.stride, - self.padding, output_size)
    - - -
    [docs]class MaxUnpool2d(Module): - r"""Computes a partial inverse of :class:`MaxPool2d`. - - :class:`MaxPool2d` is not fully invertible, since the non-maximal values are lost. - - :class:`MaxUnpool2d` takes in as input the output of :class:`MaxPool2d` - including the indices of the maximal values and computes a partial inverse - in which all non-maximal values are set to zero. - - .. note:: `MaxPool2d` can map several input sizes to the same output sizes. - Hence, the inversion process can get ambiguous. - To accommodate this, you can provide the needed output size - as an additional argument `output_size` in the forward call. - See the Inputs and Example below. - - Args: - kernel_size (int or tuple): Size of the max pooling window. - stride (int or tuple): Stride of the max pooling window. - It is set to ``kernel_size`` by default. - padding (int or tuple): Padding that was added to the input - - Inputs: - - `input`: the input Tensor to invert - - `indices`: the indices given out by `MaxPool2d` - - `output_size` (optional) : a `torch.Size` that specifies the targeted output size - - Shape: - - Input: :math:`(N, C, H_{in}, W_{in})` - - Output: :math:`(N, C, H_{out}, W_{out})` where - :math:`H_{out} = (H_{in} - 1) * stride[0] -2 * padding[0] + kernel\_size[0]` - :math:`W_{out} = (W_{in} - 1) * stride[1] -2 * padding[1] + kernel\_size[1]` - or as given by :attr:`output_size` in the call operator - - Example:: - - >>> pool = nn.MaxPool2d(2, stride=2, return_indices=True) - >>> unpool = nn.MaxUnpool2d(2, stride=2) - >>> input = Variable(torch.Tensor([[[[ 1, 2, 3, 4], - ... [ 5, 6, 7, 8], - ... [ 9, 10, 11, 12], - ... [13, 14, 15, 16]]]])) - >>> output, indices = pool(input) - >>> unpool(output, indices) - Variable containing: - (0 ,0 ,.,.) = - 0 0 0 0 - 0 6 0 8 - 0 0 0 0 - 0 14 0 16 - [torch.FloatTensor of size 1x1x4x4] - - >>> # specify a different output size than input size - >>> unpool(output, indices, output_size=torch.Size([1, 1, 5, 5])) - Variable containing: - (0 ,0 ,.,.) = - 0 0 0 0 0 - 6 0 8 0 0 - 0 0 0 14 0 - 16 0 0 0 0 - 0 0 0 0 0 - [torch.FloatTensor of size 1x1x5x5] - - """ - - def __init__(self, kernel_size, stride=None, padding=0): - super(MaxUnpool2d, self).__init__() - self.kernel_size = _pair(kernel_size) - self.stride = _pair(stride if stride is not None else kernel_size) - self.padding = _pair(padding) - - def forward(self, input, indices, output_size=None): - return F.max_unpool2d(input, indices, self.kernel_size, self.stride, - self.padding, output_size)
    - - -
    [docs]class MaxUnpool3d(Module): - r"""Computes a partial inverse of :class:`MaxPool3d`. - - :class:`MaxPool3d` is not fully invertible, since the non-maximal values are lost. - :class:`MaxUnpool3d` takes in as input the output of :class:`MaxPool3d` - including the indices of the maximal values and computes a partial inverse - in which all non-maximal values are set to zero. - - .. note:: `MaxPool3d` can map several input sizes to the same output sizes. - Hence, the inversion process can get ambiguous. - To accommodate this, you can provide the needed output size - as an additional argument `output_size` in the forward call. - See the Inputs section below. - - Args: - kernel_size (int or tuple): Size of the max pooling window. - stride (int or tuple): Stride of the max pooling window. - It is set to ``kernel_size`` by default. - padding (int or tuple): Padding that was added to the input - - Inputs: - - `input`: the input Tensor to invert - - `indices`: the indices given out by `MaxPool3d` - - `output_size` (optional) : a `torch.Size` that specifies the targeted output size - - Shape: - - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` - - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` where - :math:`D_{out} = (D_{in} - 1) * stride[0] - 2 * padding[0] + kernel\_size[0]` - :math:`H_{out} = (H_{in} - 1) * stride[1] - 2 * padding[1] + kernel\_size[1]` - :math:`W_{out} = (W_{in} - 1) * stride[2] - 2 * padding[2] + kernel\_size[2]` - or as given by :attr:`output_size` in the call operator - - Example:: - - >>> # pool of square window of size=3, stride=2 - >>> pool = nn.MaxPool3d(3, stride=2, return_indices=True) - >>> unpool = nn.MaxUnpool3d(3, stride=2) - >>> output, indices = pool(Variable(torch.randn(20, 16, 51, 33, 15))) - >>> unpooled_output = unpool(output, indices) - >>> unpooled_output.size() - torch.Size([20, 16, 51, 33, 15]) - """ - - def __init__(self, kernel_size, stride=None, padding=0): - super(MaxUnpool3d, self).__init__() - self.kernel_size = _triple(kernel_size) - self.stride = _triple(stride if stride is not None else kernel_size) - self.padding = _triple(padding) - - def forward(self, input, indices, output_size=None): - return F.max_unpool3d(input, indices, self.kernel_size, self.stride, - self.padding, output_size)
    - - -
    [docs]class AvgPool1d(Module): - r"""Applies a 1D average pooling over an input signal composed of several - input planes. - - In the simplest case, the output value of the layer with input size :math:`(N, C, L)`, - output :math:`(N, C, L_{out})` and :attr:`kernel_size` :math:`k` - can be precisely described as: - - .. math:: - - \begin{array}{ll} - out(N_i, C_j, l) = 1 / k * \sum_{{m}=0}^{k} - input(N_i, C_j, stride * l + m) - \end{array} - - | If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides - for :attr:`padding` number of points - - The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can each be - an ``int`` or a one-element tuple. - - Args: - kernel_size: the size of the window - stride: the stride of the window. Default value is :attr:`kernel_size` - padding: implicit zero padding to be added on both sides - ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape - count_include_pad: when True, will include the zero-padding in the averaging calculation - - Shape: - - Input: :math:`(N, C, L_{in})` - - Output: :math:`(N, C, L_{out})` where - :math:`L_{out} = floor((L_{in} + 2 * padding - kernel\_size) / stride + 1)` - - Examples:: - - >>> # pool with window of size=3, stride=2 - >>> m = nn.AvgPool1d(3, stride=2) - >>> m(Variable(torch.Tensor([[[1,2,3,4,5,6,7]]]))) - Variable containing: - (0 ,.,.) = - 2 4 6 - [torch.FloatTensor of size 1x1x3] - """ - - def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False, - count_include_pad=True): - super(AvgPool1d, self).__init__() - self.kernel_size = _single(kernel_size) - self.stride = _single(stride if stride is not None else kernel_size) - self.padding = _single(padding) - self.ceil_mode = ceil_mode - self.count_include_pad = count_include_pad - - def forward(self, input): - return F.avg_pool1d( - input, self.kernel_size, self.stride, self.padding, self.ceil_mode, - self.count_include_pad)
    - - -
    [docs]class AvgPool2d(Module): - r"""Applies a 2D average pooling over an input signal composed of several input - planes. - - In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`, - output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)` - can be precisely described as: - - .. math:: - - \begin{array}{ll} - out(N_i, C_j, h, w) = 1 / (kH * kW) * \sum_{{m}=0}^{kH-1} \sum_{{n}=0}^{kW-1} - input(N_i, C_j, stride[0] * h + m, stride[1] * w + n) - \end{array} - - | If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides - for :attr:`padding` number of points - - The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can either be: - - - a single ``int`` -- in which case the same value is used for the height and width dimension - - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension, - and the second `int` for the width dimension - - Args: - kernel_size: the size of the window - stride: the stride of the window. Default value is :attr:`kernel_size` - padding: implicit zero padding to be added on both sides - ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape - count_include_pad: when True, will include the zero-padding in the averaging calculation - - Shape: - - Input: :math:`(N, C, H_{in}, W_{in})` - - Output: :math:`(N, C, H_{out}, W_{out})` where - :math:`H_{out} = floor((H_{in} + 2 * padding[0] - kernel\_size[0]) / stride[0] + 1)` - :math:`W_{out} = floor((W_{in} + 2 * padding[1] - kernel\_size[1]) / stride[1] + 1)` - - Examples:: - - >>> # pool of square window of size=3, stride=2 - >>> m = nn.AvgPool2d(3, stride=2) - >>> # pool of non-square window - >>> m = nn.AvgPool2d((3, 2), stride=(2, 1)) - >>> input = autograd.Variable(torch.randn(20, 16, 50, 32)) - >>> output = m(input) - """ - - def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False, - count_include_pad=True): - super(AvgPool2d, self).__init__() - self.kernel_size = kernel_size - self.stride = stride or kernel_size - self.padding = padding - self.ceil_mode = ceil_mode - self.count_include_pad = count_include_pad - - def forward(self, input): - return F.avg_pool2d(input, self.kernel_size, self.stride, - self.padding, self.ceil_mode, self.count_include_pad)
    - - -
    [docs]class MaxPool3d(Module): - r"""Applies a 3D max pooling over an input signal composed of several input - planes. - - In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`, - output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)` - can be precisely described as: - - .. math:: - - \begin{array}{ll} - out(N_i, C_j, d, h, w) = \max_{{k}=0}^{kD-1} \max_{{m}=0}^{kH-1} \max_{{n}=0}^{kW-1} - input(N_i, C_j, stride[0] * k + d, stride[1] * h + m, stride[2] * w + n) - \end{array} - - | If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides - for :attr:`padding` number of points - | :attr:`dilation` controls the spacing between the kernel points. It is harder to describe, - but this `link`_ has a nice visualization of what :attr:`dilation` does. - - The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be: - - - a single ``int`` -- in which case the same value is used for the height and width dimension - - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension, - the second `int` for the width dimension and the third `int` for the width dimension - - Args: - kernel_size: the size of the window to take a max over - stride: the stride of the window. Default value is :attr:`kernel_size` - padding: implicit zero padding to be added on both sides - dilation: a parameter that controls the stride of elements in the window - return_indices: if True, will return the max indices along with the outputs. - Useful when Unpooling later - ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape - - Shape: - - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` - - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` where - :math:`D_{out} = floor((D_{in} + 2 * padding[0] - dilation[0] * (kernel\_size[0] - 1) - 1) / stride[0] + 1)` - :math:`H_{out} = floor((H_{in} + 2 * padding[1] - dilation[1] * (kernel\_size[1] - 1) - 1) / stride[1] + 1)` - :math:`W_{out} = floor((W_{in} + 2 * padding[2] - dilation[2] * (kernel\_size[2] - 1) - 1) / stride[2] + 1)` - - Examples:: - - >>> # pool of square window of size=3, stride=2 - >>> m = nn.MaxPool3d(3, stride=2) - >>> # pool of non-square window - >>> m = nn.MaxPool3d((3, 2, 2), stride=(2, 1, 2)) - >>> input = autograd.Variable(torch.randn(20, 16, 50,44, 31)) - >>> output = m(input) - - .. _link: - https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md - """ - - def __init__(self, kernel_size, stride=None, padding=0, dilation=1, - return_indices=False, ceil_mode=False): - super(MaxPool3d, self).__init__() - self.kernel_size = kernel_size - self.stride = stride or kernel_size - self.padding = padding - self.dilation = dilation - self.return_indices = return_indices - self.ceil_mode = ceil_mode - - def forward(self, input): - return F.max_pool3d(input, self.kernel_size, self.stride, - self.padding, self.dilation, self.ceil_mode, - self.return_indices)
    - - -
    [docs]class AvgPool3d(Module): - r"""Applies a 3D average pooling over an input signal composed of several input - planes. - - In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`, - output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)` - can be precisely described as: - - .. math:: - - \begin{array}{ll} - out(N_i, C_j, d, h, w) = 1 / (kD * kH * kW) * \sum_{{k}=0}^{kD-1} \sum_{{m}=0}^{kH-1} \sum_{{n}=0}^{kW-1} - input(N_i, C_j, stride[0] * d + k, stride[1] * h + m, stride[2] * w + n) - \end{array} - - The parameters :attr:`kernel_size`, :attr:`stride` can either be: - - - a single ``int`` -- in which case the same value is used for the height and width dimension - - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension, - the second `int` for the width dimension and the third `int` for the width dimension - - Args: - kernel_size: the size of the window - stride: the stride of the window. Default value is :attr:`kernel_size` - - Shape: - - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` - - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` where - :math:`D_{out} = floor((D_{in} - kernel\_size[0]) / stride[0] + 1)` - :math:`H_{out} = floor((H_{in} - kernel\_size[1]) / stride[1] + 1)` - :math:`W_{out} = floor((W_{in} - kernel\_size[2]) / stride[2] + 1)` - - Examples:: - - >>> # pool of square window of size=3, stride=2 - >>> m = nn.AvgPool3d(3, stride=2) - >>> # pool of non-square window - >>> m = nn.AvgPool3d((3, 2, 2), stride=(2, 1, 2)) - >>> input = autograd.Variable(torch.randn(20, 16, 50,44, 31)) - >>> output = m(input) - """ - - def __init__(self, kernel_size, stride=None): - super(AvgPool3d, self).__init__() - self.kernel_size = kernel_size - self.stride = stride - - def forward(self, input): - return F.avg_pool3d(input, self.kernel_size, self.stride)
    - - -
    [docs]class FractionalMaxPool2d(Module): - """Applies a 2D fractional max pooling over an input signal composed of several input planes. - - Fractiona MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham - - The max-pooling operation is applied in kHxkW regions by a stochastic - step size determined by the target output size. - The number of output features is equal to the number of input planes. - - Args: - kernel_size: the size of the window to take a max over. - Can be a single number k (for a square kernel of k x k) or a tuple (kh x kw) - output_size: the target output size of the image of the form oH x oW. - Can be a tuple (oH, oW) or a single number oH for a square image oH x oH - output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given. - This has to be a number or tuple in the range (0, 1) - return_indices: if True, will return the indices along with the outputs. - Useful to pass to nn.MaxUnpool2d . Default: False - - Examples: - >>> # pool of square window of size=3, and target output size 13x12 - >>> m = nn.FractionalMaxPool2d(3, output_size=(13, 12)) - >>> # pool of square window and target output size being half of input image size - >>> m = nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5)) - >>> input = autograd.Variable(torch.randn(20, 16, 50, 32)) - >>> output = m(input) - - .. _Fractional MaxPooling: - http://arxiv.org/abs/1412.6071 - """ - - def __init__(self, kernel_size, output_size=None, output_ratio=None, - return_indices=False, _random_samples=None): - super(FractionalMaxPool2d, self).__init__() - self.kh, self.kw = _pair(kernel_size) - self.return_indices = return_indices - self.register_buffer('_random_samples', _random_samples) - if output_size is not None: - self.outh, self.outw = _pair(output_size) - self.rh, self.rw = None, None - assert output_ratio is None - elif output_ratio is not None: - self.outh, self.outw = None, None - self.rh, self.rw = _pair(output_ratio) - assert output_size is None - assert 0 < self.rh < 1 - assert 0 < self.rw < 1 - else: - raise ValueError("FractionalMaxPool2d requires specifying either " - "an output size, or a pooling ratio") - - def forward(self, input): - kwargs = {} - if self.outh is not None: - kwargs['output_size'] = self.outh, self.outw - else: - kwargs['output_ratio'] = self.rh, self.rw - func = self._backend.FractionalMaxPool2d(self.kw, self.kh, - return_indices=self.return_indices, - _random_samples=self._random_samples, **kwargs) - return func(input)
    - - -
    [docs]class LPPool2d(Module): - r"""Applies a 2D power-average pooling over an input signal composed of several input - planes. - - On each window, the function computed is: :math:`f(X) = pow(sum(pow(X, p)), 1/p)` - - - At p = infinity, one gets Max Pooling - - At p = 1, one gets Average Pooling - - The parameters :attr:`kernel_size`, :attr:`stride` can either be: - - - a single ``int`` -- in which case the same value is used for the height and width dimension - - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension, - and the second `int` for the width dimension - - Args: - kernel_size: the size of the window - stride: the stride of the window. Default value is :attr:`kernel_size` - ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape - - Shape: - - Input: :math:`(N, C, H_{in}, W_{in})` - - Output: :math:`(N, C, H_{out}, W_{out})` where - :math:`H_{out} = floor((H_{in} + 2 * padding[0] - dilation[0] * (kernel\_size[0] - 1) - 1) / stride[0] + 1)` - :math:`W_{out} = floor((W_{in} + 2 * padding[1] - dilation[1] * (kernel\_size[1] - 1) - 1) / stride[1] + 1)` - - Examples:: - - >>> # power-2 pool of square window of size=3, stride=2 - >>> m = nn.LPPool2d(2, 3, stride=2) - >>> # pool of non-square window of power 1.2 - >>> m = nn.LPPool2d(1.2, (3, 2), stride=(2, 1)) - >>> input = autograd.Variable(torch.randn(20, 16, 50, 32)) - >>> output = m(input) - - """ - - def __init__(self, norm_type, kernel_size, stride=None, ceil_mode=False): - super(LPPool2d, self).__init__() - self.norm_type = norm_type - self.kernel_size = kernel_size - self.stride = stride - self.ceil_mode = ceil_mode - - def forward(self, input): - return F.lp_pool2d(input, self.norm_type, self.kernel_size, - self.stride, self.ceil_mode)
    - - -# TODO: AdaptiveMaxPool2d -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/nn/modules/rnn.html b/docs/_modules/torch/nn/modules/rnn.html deleted file mode 100644 index 761791e7a54c..000000000000 --- a/docs/_modules/torch/nn/modules/rnn.html +++ /dev/null @@ -1,1126 +0,0 @@ - - - - - - - - - - - torch.nn.modules.rnn — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.nn.modules.rnn

    -import math
    -import torch
    -
    -from .module import Module
    -from ..parameter import Parameter
    -from ..utils.rnn import PackedSequence
    -
    -
    -class RNNBase(Module):
    -
    -    def __init__(self, mode, input_size, hidden_size,
    -                 num_layers=1, bias=True, batch_first=False,
    -                 dropout=0, bidirectional=False):
    -        super(RNNBase, self).__init__()
    -        self.mode = mode
    -        self.input_size = input_size
    -        self.hidden_size = hidden_size
    -        self.num_layers = num_layers
    -        self.bias = bias
    -        self.batch_first = batch_first
    -        self.dropout = dropout
    -        self.dropout_state = {}
    -        self.bidirectional = bidirectional
    -        num_directions = 2 if bidirectional else 1
    -
    -        self._all_weights = []
    -        for layer in range(num_layers):
    -            for direction in range(num_directions):
    -                layer_input_size = input_size if layer == 0 else hidden_size * num_directions
    -                if mode == 'LSTM':
    -                    gate_size = 4 * hidden_size
    -                elif mode == 'GRU':
    -                    gate_size = 3 * hidden_size
    -                else:
    -                    gate_size = hidden_size
    -
    -                w_ih = Parameter(torch.Tensor(gate_size, layer_input_size))
    -                w_hh = Parameter(torch.Tensor(gate_size, hidden_size))
    -                b_ih = Parameter(torch.Tensor(gate_size))
    -                b_hh = Parameter(torch.Tensor(gate_size))
    -
    -                suffix = '_reverse' if direction == 1 else ''
    -                weights = ['weight_ih_l{}{}', 'weight_hh_l{}{}', 'bias_ih_l{}{}', 'bias_hh_l{}{}']
    -                weights = [x.format(layer, suffix) for x in weights]
    -                setattr(self, weights[0], w_ih)
    -                setattr(self, weights[1], w_hh)
    -                if bias:
    -                    setattr(self, weights[2], b_ih)
    -                    setattr(self, weights[3], b_hh)
    -                    self._all_weights += [weights]
    -                else:
    -                    self._all_weights += [weights[:2]]
    -
    -        self.reset_parameters()
    -
    -    def reset_parameters(self):
    -        stdv = 1.0 / math.sqrt(self.hidden_size)
    -        for weight in self.parameters():
    -            weight.data.uniform_(-stdv, stdv)
    -
    -    def forward(self, input, hx=None):
    -        is_packed = isinstance(input, PackedSequence)
    -        if is_packed:
    -            input, batch_sizes = input
    -            max_batch_size = batch_sizes[0]
    -        else:
    -            batch_sizes = None
    -            max_batch_size = input.size(0) if self.batch_first else input.size(1)
    -
    -        if hx is None:
    -            num_directions = 2 if self.bidirectional else 1
    -            hx = torch.autograd.Variable(input.data.new(self.num_layers *
    -                                                        num_directions,
    -                                                        max_batch_size,
    -                                                        self.hidden_size).zero_())
    -            if self.mode == 'LSTM':
    -                hx = (hx, hx)
    -
    -        func = self._backend.RNN(
    -            self.mode,
    -            self.input_size,
    -            self.hidden_size,
    -            num_layers=self.num_layers,
    -            batch_first=self.batch_first,
    -            dropout=self.dropout,
    -            train=self.training,
    -            bidirectional=self.bidirectional,
    -            batch_sizes=batch_sizes,
    -            dropout_state=self.dropout_state
    -        )
    -        output, hidden = func(input, self.all_weights, hx)
    -        if is_packed:
    -            output = PackedSequence(output, batch_sizes)
    -        return output, hidden
    -
    -    def __repr__(self):
    -        s = '{name}({input_size}, {hidden_size}'
    -        if self.num_layers != 1:
    -            s += ', num_layers={num_layers}'
    -        if self.bias is not True:
    -            s += ', bias={bias}'
    -        if self.batch_first is not False:
    -            s += ', batch_first={batch_first}'
    -        if self.dropout != 0:
    -            s += ', dropout={dropout}'
    -        if self.bidirectional is not False:
    -            s += ', bidirectional={bidirectional}'
    -        s += ')'
    -        return s.format(name=self.__class__.__name__, **self.__dict__)
    -
    -    def __setstate__(self, d):
    -        self.__dict__.update(d)
    -        if 'all_weights' in d:
    -            self._all_weights = d['all_weights']
    -        if isinstance(self._all_weights[0][0], str):
    -            return
    -        num_layers = self.num_layers
    -        num_directions = 2 if self.bidirectional else 1
    -        self._all_weights = []
    -        for layer in range(num_layers):
    -            for direction in range(num_directions):
    -                suffix = '_reverse' if direction == 1 else ''
    -                weights = ['weight_ih_l{}{}', 'weight_hh_l{}{}', 'bias_ih_l{}{}', 'bias_hh_l{}{}']
    -                weights = [x.format(layer, suffix) for x in weights]
    -                if self.bias:
    -                    self._all_weights += [weights]
    -                else:
    -                    self._all_weights += [weights[:2]]
    -
    -    @property
    -    def all_weights(self):
    -        return [[getattr(self, weight) for weight in weights] for weights in self._all_weights]
    -
    -
    -
    [docs]class RNN(RNNBase): - r"""Applies a multi-layer Elman RNN with tanh or ReLU non-linearity to an input sequence. - - - For each element in the input sequence, each layer computes the following - function: - - .. math:: - - h_t = \tanh(w_{ih} * x_t + b_{ih} + w_{hh} * h_{(t-1)} + b_{hh}) - - where :math:`h_t` is the hidden state at time `t`, and :math:`x_t` is the hidden - state of the previous layer at time `t` or :math:`input_t` for the first layer. - If nonlinearity='relu', then `ReLU` is used instead of `tanh`. - - Args: - input_size: The number of expected features in the input x - hidden_size: The number of features in the hidden state h - num_layers: Number of recurrent layers. - nonlinearity: The non-linearity to use ['tanh'|'relu']. Default: 'tanh' - bias: If False, then the layer does not use bias weights b_ih and b_hh. Default: True - batch_first: If True, then the input and output tensors are provided as (batch, seq, feature) - dropout: If non-zero, introduces a dropout layer on the outputs of each RNN layer except the last layer - bidirectional: If True, becomes a bidirectional RNN. Default: False - - Inputs: input, h_0 - - **input** (seq_len, batch, input_size): tensor containing the features of the input sequence. - The input can also be a packed variable length sequence. See :func:`torch.nn.utils.rnn.pack_padded_sequence` - for details. - - **h_0** (num_layers * num_directions, batch, hidden_size): tensor containing the initial hidden state - for each element in the batch. - - Outputs: output, h_n - - **output** (seq_len, batch, hidden_size * num_directions): tensor containing the output features (h_k) - from the last layer of the RNN, for each k. If a :class:`torch.nn.utils.rnn.PackedSequence` has been given - as the input, the output will also be a packed sequence. - - **h_n** (num_layers * num_directions, batch, hidden_size): tensor containing the hidden state for k=seq_len. - - Attributes: - weight_ih_l[k]: the learnable input-hidden weights of the k-th layer, - of shape `(input_size x hidden_size)` - weight_hh_l[k]: the learnable hidden-hidden weights of the k-th layer, - of shape `(hidden_size x hidden_size)` - bias_ih_l[k]: the learnable input-hidden bias of the k-th layer, of shape `(hidden_size)` - bias_hh_l[k]: the learnable hidden-hidden bias of the k-th layer, of shape `(hidden_size)` - - Examples:: - - >>> rnn = nn.RNN(10, 20, 2) - >>> input = Variable(torch.randn(5, 3, 10)) - >>> h0 = Variable(torch.randn(2, 3, 20)) - >>> output, hn = rnn(input, h0) - """ - - def __init__(self, *args, **kwargs): - if 'nonlinearity' in kwargs: - if kwargs['nonlinearity'] == 'tanh': - mode = 'RNN_TANH' - elif kwargs['nonlinearity'] == 'relu': - mode = 'RNN_RELU' - else: - raise ValueError("Unknown nonlinearity '{}'".format( - kwargs['nonlinearity'])) - del kwargs['nonlinearity'] - else: - mode = 'RNN_TANH' - - super(RNN, self).__init__(mode, *args, **kwargs)
    - - -
    [docs]class LSTM(RNNBase): - r"""Applies a multi-layer long short-term memory (LSTM) RNN to an input sequence. - - - For each element in the input sequence, each layer computes the following - function: - - .. math:: - - \begin{array}{ll} - i_t = sigmoid(W_{ii} x_t + b_{ii} + W_{hi} h_{(t-1)} + b_{hi}) \\ - f_t = sigmoid(W_{if} x_t + b_{if} + W_{hf} h_{(t-1)} + b_{hf}) \\ - g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hc} h_{(t-1)} + b_{hg}) \\ - o_t = sigmoid(W_{io} x_t + b_{io} + W_{ho} h_{(t-1)} + b_{ho}) \\ - c_t = f_t * c_{(t-1)} + i_t * g_t \\ - h_t = o_t * \tanh(c_t) - \end{array} - - where :math:`h_t` is the hidden state at time `t`, :math:`c_t` is the cell state at time `t`, - :math:`x_t` is the hidden state of the previous layer at time `t` or :math:`input_t` for the first layer, - and :math:`i_t`, :math:`f_t`, :math:`g_t`, :math:`o_t` are the input, forget, - cell, and out gates, respectively. - - Args: - input_size: The number of expected features in the input x - hidden_size: The number of features in the hidden state h - num_layers: Number of recurrent layers. - bias: If False, then the layer does not use bias weights b_ih and b_hh. Default: True - batch_first: If True, then the input and output tensors are provided as (batch, seq, feature) - dropout: If non-zero, introduces a dropout layer on the outputs of each RNN layer except the last layer - bidirectional: If True, becomes a bidirectional RNN. Default: False - - Inputs: input, (h_0, c_0) - - **input** (seq_len, batch, input_size): tensor containing the features of the input sequence. - The input can also be a packed variable length sequence. See :func:`torch.nn.utils.rnn.pack_padded_sequence` - for details. - - **h_0** (num_layers \* num_directions, batch, hidden_size): tensor containing - the initial hidden state for each element in the batch. - - **c_0** (num_layers \* num_directions, batch, hidden_size): tensor containing - the initial cell state for each element in the batch. - - - Outputs: output, (h_n, c_n) - - **output** (seq_len, batch, hidden_size * num_directions): tensor containing - the output features `(h_t)` from the last layer of the RNN, for each t. If a - :class:`torch.nn.utils.rnn.PackedSequence` has been given as the input, the output will also be a - packed sequence. - - **h_n** (num_layers * num_directions, batch, hidden_size): tensor containing the hidden state for t=seq_len - - **c_n** (num_layers * num_directions, batch, hidden_size): tensor containing the cell state for t=seq_len - - Attributes: - weight_ih_l[k] : the learnable input-hidden weights of the k-th layer `(W_ii|W_if|W_ig|W_io)`, of shape - `(input_size x 4*hidden_size)` - weight_hh_l[k] : the learnable hidden-hidden weights of the k-th layer `(W_hi|W_hf|W_hg|W_ho)`, of shape - `(hidden_size x 4*hidden_size)` - bias_ih_l[k] : the learnable input-hidden bias of the k-th layer `(b_ii|b_if|b_ig|b_io)`, of shape - `(4*hidden_size)` - bias_hh_l[k] : the learnable hidden-hidden bias of the k-th layer `(W_hi|W_hf|W_hg|b_ho)`, of shape - `(4*hidden_size)` - - Examples:: - - >>> rnn = nn.LSTM(10, 20, 2) - >>> input = Variable(torch.randn(5, 3, 10)) - >>> h0 = Variable(torch.randn(2, 3, 20)) - >>> c0 = Variable(torch.randn(2, 3, 20)) - >>> output, hn = rnn(input, (h0, c0)) - """ - - def __init__(self, *args, **kwargs): - super(LSTM, self).__init__('LSTM', *args, **kwargs)
    - - -
    [docs]class GRU(RNNBase): - r"""Applies a multi-layer gated recurrent unit (GRU) RNN to an input sequence. - - - For each element in the input sequence, each layer computes the following - function: - - .. math:: - - \begin{array}{ll} - r_t = sigmoid(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\ - i_t = sigmoid(W_{ii} x_t + b_{ii} + W_hi h_{(t-1)} + b_{hi}) \\ - n_t = \tanh(W_{in} x_t + b_{in} + r_t * (W_{hn} h_{(t-1)}+ b_{hn})) \\ - h_t = (1 - i_t) * n_t + i_t * h_{(t-1)} \\ - \end{array} - - where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is the hidden - state of the previous layer at time `t` or :math:`input_t` for the first layer, - and :math:`r_t`, :math:`i_t`, :math:`n_t` are the reset, input, and new gates, respectively. - - Args: - input_size: The number of expected features in the input x - hidden_size: The number of features in the hidden state h - num_layers: Number of recurrent layers. - bias: If False, then the layer does not use bias weights b_ih and b_hh. Default: True - batch_first: If True, then the input and output tensors are provided as (batch, seq, feature) - dropout: If non-zero, introduces a dropout layer on the outputs of each RNN layer except the last layer - bidirectional: If True, becomes a bidirectional RNN. Default: False - - Inputs: input, h_0 - - **input** (seq_len, batch, input_size): tensor containing the features of the input sequence. - The input can also be a packed variable length sequence. See :func:`torch.nn.utils.rnn.pack_padded_sequence` - for details. - - **h_0** (num_layers * num_directions, batch, hidden_size): tensor containing the initial - hidden state for each element in the batch. - - Outputs: output, h_n - - **output** (seq_len, batch, hidden_size * num_directions): tensor containing the output features h_t from - the last layer of the RNN, for each t. If a :class:`torch.nn.utils.rnn.PackedSequence` has been given as the - input, the output will also be a packed sequence. - - **h_n** (num_layers * num_directions, batch, hidden_size): tensor containing the hidden state for t=seq_len - - Attributes: - weight_ih_l[k] : the learnable input-hidden weights of the k-th layer (W_ir|W_ii|W_in), of shape - `(input_size x 3*hidden_size)` - weight_hh_l[k] : the learnable hidden-hidden weights of the k-th layer (W_hr|W_hi|W_hn), of shape - `(hidden_size x 3*hidden_size)` - bias_ih_l[k] : the learnable input-hidden bias of the k-th layer (b_ir|b_ii|b_in), of shape - `(3*hidden_size)` - bias_hh_l[k] : the learnable hidden-hidden bias of the k-th layer (W_hr|W_hi|W_hn), of shape - `(3*hidden_size)` - Examples:: - - >>> rnn = nn.GRU(10, 20, 2) - >>> input = Variable(torch.randn(5, 3, 10)) - >>> h0 = Variable(torch.randn(2, 3, 20)) - >>> output, hn = rnn(input, h0) - """ - - def __init__(self, *args, **kwargs): - super(GRU, self).__init__('GRU', *args, **kwargs)
    - - -class RNNCellBase(Module): - - def __repr__(self): - s = '{name}({input_size}, {hidden_size}' - if 'bias' in self.__dict__ and self.bias is not True: - s += ', bias={bias}' - if 'nonlinearity' in self.__dict__ and self.nonlinearity != "tanh": - s += ', nonlinearity={nonlinearity}' - s += ')' - return s.format(name=self.__class__.__name__, **self.__dict__) - - -
    [docs]class RNNCell(RNNCellBase): - r"""An Elman RNN cell with tanh or ReLU non-linearity. - - .. math:: - - h' = \tanh(w_{ih} * x + b_{ih} + w_{hh} * h + b_{hh}) - - If nonlinearity='relu', then ReLU is used in place of tanh. - - Args: - input_size: The number of expected features in the input x - hidden_size: The number of features in the hidden state h - bias: If False, then the layer does not use bias weights b_ih and b_hh. Default: True - nonlinearity: The non-linearity to use ['tanh'|'relu']. Default: 'tanh' - - Inputs: input, hidden - - **input** (batch, input_size): tensor containing input features - - **hidden** (batch, hidden_size): tensor containing the initial hidden state for each element in the batch. - - Outputs: h' - - **h'** (batch, hidden_size): tensor containing the next hidden state for each element in the batch - - Attributes: - weight_ih: the learnable input-hidden weights, of shape `(input_size x hidden_size)` - weight_hh: the learnable hidden-hidden weights, of shape `(hidden_size x hidden_size)` - bias_ih: the learnable input-hidden bias, of shape `(hidden_size)` - bias_hh: the learnable hidden-hidden bias, of shape `(hidden_size)` - - Examples:: - - >>> rnn = nn.RNNCell(10, 20) - >>> input = Variable(torch.randn(6, 3, 10)) - >>> hx = Variable(torch.randn(3, 20)) - >>> output = [] - >>> for i in range(6): - ... hx = rnn(input[i], hx) - ... output.append(hx) - """ - - def __init__(self, input_size, hidden_size, bias=True, nonlinearity="tanh"): - super(RNNCell, self).__init__() - self.input_size = input_size - self.hidden_size = hidden_size - self.bias = bias - self.nonlinearity = nonlinearity - self.weight_ih = Parameter(torch.Tensor(hidden_size, input_size)) - self.weight_hh = Parameter(torch.Tensor(hidden_size, hidden_size)) - if bias: - self.bias_ih = Parameter(torch.Tensor(hidden_size)) - self.bias_hh = Parameter(torch.Tensor(hidden_size)) - else: - self.register_parameter('bias_ih', None) - self.register_parameter('bias_hh', None) - self.reset_parameters() - - def reset_parameters(self): - stdv = 1.0 / math.sqrt(self.hidden_size) - for weight in self.parameters(): - weight.data.uniform_(-stdv, stdv) - - def forward(self, input, hx): - if self.nonlinearity == "tanh": - func = self._backend.RNNTanhCell - elif self.nonlinearity == "relu": - func = self._backend.RNNReLUCell - else: - raise RuntimeError( - "Unknown nonlinearity: {}".format(self.nonlinearity)) - - return func( - input, hx, - self.weight_ih, self.weight_hh, - self.bias_ih, self.bias_hh, - )
    - - -
    [docs]class LSTMCell(RNNCellBase): - r"""A long short-term memory (LSTM) cell. - - .. math:: - - \begin{array}{ll} - i = sigmoid(W_{ii} x + b_{ii} + W_{hi} h + b_{hi}) \\ - f = sigmoid(W_{if} x + b_{if} + W_{hf} h + b_{hf}) \\ - g = \tanh(W_{ig} x + b_{ig} + W_{hc} h + b_{hg}) \\ - o = sigmoid(W_{io} x + b_{io} + W_{ho} h + b_{ho}) \\ - c' = f * c + i * g \\ - h' = o * \tanh(c_t) \\ - \end{array} - - Args: - input_size: The number of expected features in the input x - hidden_size: The number of features in the hidden state h - bias: If `False`, then the layer does not use bias weights `b_ih` and `b_hh`. Default: True - - Inputs: input, (h_0, c_0) - - **input** (batch, input_size): tensor containing input features - - **h_0** (batch, hidden_size): tensor containing the initial hidden state for each element in the batch. - - **c_0** (batch. hidden_size): tensor containing the initial cell state for each element in the batch. - - Outputs: h_1, c_1 - - **h_1** (batch, hidden_size): tensor containing the next hidden state for each element in the batch - - **c_1** (batch, hidden_size): tensor containing the next cell state for each element in the batch - - Attributes: - weight_ih: the learnable input-hidden weights, of shape `(input_size x hidden_size)` - weight_hh: the learnable hidden-hidden weights, of shape `(hidden_size x hidden_size)` - bias_ih: the learnable input-hidden bias, of shape `(hidden_size)` - bias_hh: the learnable hidden-hidden bias, of shape `(hidden_size)` - - Examples:: - - >>> rnn = nn.LSTMCell(10, 20) - >>> input = Variable(torch.randn(6, 3, 10)) - >>> hx = Variable(torch.randn(3, 20)) - >>> cx = Variable(torch.randn(3, 20)) - >>> output = [] - >>> for i in range(6): - ... hx, cx = rnn(input[i], (hx, cx)) - ... output.append(hx) - """ - - def __init__(self, input_size, hidden_size, bias=True): - super(LSTMCell, self).__init__() - self.input_size = input_size - self.hidden_size = hidden_size - self.bias = bias - self.weight_ih = Parameter(torch.Tensor(4 * hidden_size, input_size)) - self.weight_hh = Parameter(torch.Tensor(4 * hidden_size, hidden_size)) - if bias: - self.bias_ih = Parameter(torch.Tensor(4 * hidden_size)) - self.bias_hh = Parameter(torch.Tensor(4 * hidden_size)) - else: - self.register_parameter('bias_ih', None) - self.register_parameter('bias_hh', None) - self.reset_parameters() - - def reset_parameters(self): - stdv = 1.0 / math.sqrt(self.hidden_size) - for weight in self.parameters(): - weight.data.uniform_(-stdv, stdv) - - def forward(self, input, hx): - return self._backend.LSTMCell( - input, hx, - self.weight_ih, self.weight_hh, - self.bias_ih, self.bias_hh, - )
    - - -
    [docs]class GRUCell(RNNCellBase): - r"""A gated recurrent unit (GRU) cell - - .. math:: - - \begin{array}{ll} - r = sigmoid(W_{ir} x + b_{ir} + W_{hr} h + b_{hr}) \\ - i = sigmoid(W_{ii} x + b_{ii} + W_{hi} h + b_{hi}) \\ - n = \tanh(W_{in} x + b_{in} + r * (W_{hn} h + b_{hn})) \\ - h' = (1 - i) * n + i * h - \end{array} - - Args: - input_size: The number of expected features in the input x - hidden_size: The number of features in the hidden state h - bias: If `False`, then the layer does not use bias weights `b_ih` and `b_hh`. Default: `True` - - Inputs: input, hidden - - **input** (batch, input_size): tensor containing input features - - **hidden** (batch, hidden_size): tensor containing the initial hidden state for each element in the batch. - - Outputs: h' - - **h'**: (batch, hidden_size): tensor containing the next hidden state for each element in the batch - - Attributes: - weight_ih: the learnable input-hidden weights, of shape `(input_size x hidden_size)` - weight_hh: the learnable hidden-hidden weights, of shape `(hidden_size x hidden_size)` - bias_ih: the learnable input-hidden bias, of shape `(hidden_size)` - bias_hh: the learnable hidden-hidden bias, of shape `(hidden_size)` - - Examples:: - - >>> rnn = nn.GRUCell(10, 20) - >>> input = Variable(torch.randn(6, 3, 10)) - >>> hx = Variable(torch.randn(3, 20)) - >>> output = [] - >>> for i in range(6): - ... hx = rnn(input[i], hx) - ... output.append(hx) - """ - - def __init__(self, input_size, hidden_size, bias=True): - super(GRUCell, self).__init__() - self.input_size = input_size - self.hidden_size = hidden_size - self.bias = bias - self.weight_ih = Parameter(torch.Tensor(3 * hidden_size, input_size)) - self.weight_hh = Parameter(torch.Tensor(3 * hidden_size, hidden_size)) - if bias: - self.bias_ih = Parameter(torch.Tensor(3 * hidden_size)) - self.bias_hh = Parameter(torch.Tensor(3 * hidden_size)) - else: - self.register_parameter('bias_ih', None) - self.register_parameter('bias_hh', None) - self.reset_parameters() - - def reset_parameters(self): - stdv = 1.0 / math.sqrt(self.hidden_size) - for weight in self.parameters(): - weight.data.uniform_(-stdv, stdv) - - def forward(self, input, hx): - return self._backend.GRUCell( - input, hx, - self.weight_ih, self.weight_hh, - self.bias_ih, self.bias_hh, - )
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/nn/modules/sparse.html b/docs/_modules/torch/nn/modules/sparse.html deleted file mode 100644 index b783c5d0202e..000000000000 --- a/docs/_modules/torch/nn/modules/sparse.html +++ /dev/null @@ -1,669 +0,0 @@ - - - - - - - - - - - torch.nn.modules.sparse — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.nn.modules.sparse

    -import torch
    -from torch.nn.parameter import Parameter
    -
    -from .module import Module
    -
    -
    -
    [docs]class Embedding(Module): - r"""A simple lookup table that stores embeddings of a fixed dictionary and size. - - This module is often used to store word embeddings and retrieve them using indices. - The input to the module is a list of indices, and the output is the corresponding - word embeddings. - - Args: - num_embeddings (int): size of the dictionary of embeddings - embedding_dim (int): the size of each embedding vector - padding_idx (int, optional): If given, pads the output with zeros whenever it encounters the index. - max_norm (float, optional): If given, will renormalize the embeddings to always have a norm lesser than this - norm_type (float, optional): The p of the p-norm to compute for the max_norm option - scale_grad_by_freq (boolean, optional): if given, this will scale gradients by the frequency of - the words in the dictionary. - - Attributes: - weight (Tensor): the learnable weights of the module of shape (num_embeddings, embedding_dim) - - Shape: - - Input: LongTensor `(N, W)`, N = mini-batch, W = number of indices to extract per mini-batch - - Output: `(N, W, embedding_dim)` - - Examples:: - - >>> # an Embedding module containing 10 tensors of size 3 - >>> embedding = nn.Embedding(10, 3) - >>> # a batch of 2 samples of 4 indices each - >>> input = Variable(torch.LongTensor([[1,2,4,5],[4,3,2,9]])) - >>> embedding(input) - - Variable containing: - (0 ,.,.) = - -1.0822 1.2522 0.2434 - 0.8393 -0.6062 -0.3348 - 0.6597 0.0350 0.0837 - 0.5521 0.9447 0.0498 - - (1 ,.,.) = - 0.6597 0.0350 0.0837 - -0.1527 0.0877 0.4260 - 0.8393 -0.6062 -0.3348 - -0.8738 -0.9054 0.4281 - [torch.FloatTensor of size 2x4x3] - - >>> # example with padding_idx - >>> embedding = nn.Embedding(10, 3, padding_idx=0) - >>> input = Variable(torch.LongTensor([[0,2,0,5]])) - >>> embedding(input) - - Variable containing: - (0 ,.,.) = - 0.0000 0.0000 0.0000 - 0.3452 0.4937 -0.9361 - 0.0000 0.0000 0.0000 - 0.0706 -2.1962 -0.6276 - [torch.FloatTensor of size 1x4x3] - - """ - - def __init__(self, num_embeddings, embedding_dim, padding_idx=None, - max_norm=None, norm_type=2, scale_grad_by_freq=False, - sparse=False): - super(Embedding, self).__init__() - self.num_embeddings = num_embeddings - self.embedding_dim = embedding_dim - self.padding_idx = padding_idx - self.max_norm = max_norm - self.norm_type = norm_type - self.scale_grad_by_freq = scale_grad_by_freq - self.weight = Parameter(torch.Tensor(num_embeddings, embedding_dim)) - self.sparse = sparse - - self.reset_parameters() - - def reset_parameters(self): - self.weight.data.normal_(0, 1) - if self.padding_idx is not None: - self.weight.data[self.padding_idx].fill_(0) - - def forward(self, input): - padding_idx = self.padding_idx - if padding_idx is None: - padding_idx = -1 - return self._backend.Embedding( - padding_idx, self.max_norm, self.norm_type, - self.scale_grad_by_freq, self.sparse - )(input, self.weight) - - def __repr__(self): - s = '{name}({num_embeddings}, {embedding_dim}' - if self.padding_idx is not None: - s += ', padding_idx={padding_idx}' - if self.max_norm is not None: - s += ', max_norm={max_norm}' - if self.norm_type != 2: - s += ', norm_type={norm_type}' - if self.scale_grad_by_freq is not False: - s += ', scale_grad_by_freq={scale_grad_by_freq}' - if self.sparse is not False: - s += ', sparse=True' - s += ')' - return s.format(name=self.__class__.__name__, **self.__dict__)
    - - -# TODO: SparseLinear -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/nn/modules/upsampling.html b/docs/_modules/torch/nn/modules/upsampling.html deleted file mode 100644 index f794ab516b36..000000000000 --- a/docs/_modules/torch/nn/modules/upsampling.html +++ /dev/null @@ -1,671 +0,0 @@ - - - - - - - - - - - torch.nn.modules.upsampling — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.nn.modules.upsampling

    -from numbers import Integral
    -
    -from .module import Module
    -from .. import functional as F
    -from .utils import _pair
    -
    -
    -class _UpsamplingBase(Module):
    -
    -    def __init__(self, size=None, scale_factor=None):
    -        super(_UpsamplingBase, self).__init__()
    -        if size is None and scale_factor is None:
    -            raise ValueError('either size or scale_factor should be defined')
    -        if scale_factor is not None and not isinstance(scale_factor, Integral):
    -            raise ValueError('scale_factor must be of integer type')
    -        self.size = _pair(size)
    -        self.scale_factor = scale_factor
    -
    -    def __repr__(self):
    -        if self.scale_factor is not None:
    -            info = 'scale_factor=' + str(self.scale_factor)
    -        else:
    -            info = 'size=' + str(self.size)
    -        return self.__class__.__name__ + '(' + info + ')'
    -
    -
    -
    [docs]class UpsamplingNearest2d(_UpsamplingBase): - """ - Applies a 2D nearest neighbor upsampling to an input signal composed of several input - channels. - - To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor` - as it's constructor argument. - - When `size` is given, it is the output size of the image (h, w). - - Args: - size (tuple, optional): a tuple of ints (H_out, W_out) output sizes - scale_factor (int, optional): the multiplier for the image height / width - - Shape: - - Input: :math:`(N, C, H_{in}, W_{in})` - - Output: :math:`(N, C, H_{out}, W_{out})` where - :math:`H_{out} = floor(H_{in} * scale\_factor)` - :math:`W_{out} = floor(W_{in} * scale\_factor)` - - Examples:: - - >>> inp - Variable containing: - (0 ,0 ,.,.) = - 1 2 - 3 4 - [torch.FloatTensor of size 1x1x2x2] - - >>> m = nn.UpsamplingNearest2d(scale_factor=2) - >>> m(inp) - Variable containing: - (0 ,0 ,.,.) = - 1 1 2 2 - 1 1 2 2 - 3 3 4 4 - 3 3 4 4 - [torch.FloatTensor of size 1x1x4x4] - - """ - - def forward(self, input): - return F.upsample_nearest(input, self.size, self.scale_factor)
    - - -
    [docs]class UpsamplingBilinear2d(_UpsamplingBase): - """ - Applies a 2D bilinear upsampling to an input signal composed of several input - channels. - - To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor` - as it's constructor argument. - - When `size` is given, it is the output size of the image (h, w). - - Args: - size (tuple, optional): a tuple of ints (H_out, W_out) output sizes - scale_factor (int, optional): the multiplier for the image height / width - - Shape: - - Input: :math:`(N, C, H_{in}, W_{in})` - - Output: :math:`(N, C, H_{out}, W_{out})` where - :math:`H_{out} = floor(H_{in} * scale\_factor)` - :math:`W_{out} = floor(W_{in} * scale\_factor)` - - Examples:: - - >>> inp - Variable containing: - (0 ,0 ,.,.) = - 1 2 - 3 4 - [torch.FloatTensor of size 1x1x2x2] - - >>> m = nn.UpsamplingBilinear2d(scale_factor=2) - >>> m(inp) - Variable containing: - (0 ,0 ,.,.) = - 1.0000 1.3333 1.6667 2.0000 - 1.6667 2.0000 2.3333 2.6667 - 2.3333 2.6667 3.0000 3.3333 - 3.0000 3.3333 3.6667 4.0000 - [torch.FloatTensor of size 1x1x4x4] - - """ - - def forward(self, input): - return F.upsample_bilinear(input, self.size, self.scale_factor)
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/nn/parallel/data_parallel.html b/docs/_modules/torch/nn/parallel/data_parallel.html deleted file mode 100644 index 86976ac62eef..000000000000 --- a/docs/_modules/torch/nn/parallel/data_parallel.html +++ /dev/null @@ -1,660 +0,0 @@ - - - - - - - - - - - torch.nn.parallel.data_parallel — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.nn.parallel.data_parallel

    -import torch
    -from ..modules import Module
    -from .scatter_gather import scatter_kwargs, gather
    -from .replicate import replicate
    -from .parallel_apply import parallel_apply
    -
    -
    -
    [docs]class DataParallel(Module): - - """Implements data parallelism at the module level. - - This container parallelizes the application of the given module by - splitting the input across the specified devices by chunking in the batch - dimension. In the forward pass, the module is replicated on each device, - and each replica handles a portion of the input. During the backwards - pass, gradients from each replica are summed into the original module. - - The batch size should be larger than the number of GPUs used. It should - also be an integer multiple of the number of GPUs so that each chunk is the - same size (so that each GPU processes the same number of samples). - - See also: :ref:`cuda-nn-dataparallel-instead` - - Arbitrary positional and keyword inputs are allowed to be passed into - DataParallel EXCEPT Tensors. All variables will be scattered on dim - specified (default 0). Primitive types will be broadcasted, but all - other types will be a shallow copy and can be corrupted if written to in - the model's forward pass. - - Args: - module: module to be parallelized - device_ids: CUDA devices (default: all devices) - output_device: device location of output (default: device_ids[0]) - - Example:: - - >>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2]) - >>> output = net(input_var) - """ - - # TODO: update notes/cuda.rst when this class handles 8+ GPUs well - - def __init__(self, module, device_ids=None, output_device=None, dim=0): - super(DataParallel, self).__init__() - if device_ids is None: - device_ids = list(range(torch.cuda.device_count())) - if output_device is None: - output_device = device_ids[0] - self.dim = dim - self.module = module - self.device_ids = device_ids - self.output_device = output_device - if len(self.device_ids) == 1: - self.module.cuda(device_ids[0]) - - def forward(self, *inputs, **kwargs): - inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids) - if len(self.device_ids) == 1: - return self.module(*inputs[0], **kwargs[0]) - replicas = self.replicate(self.module, self.device_ids[:len(inputs)]) - outputs = self.parallel_apply(replicas, inputs, kwargs) - return self.gather(outputs, self.output_device) - - def replicate(self, module, device_ids): - return replicate(module, device_ids) - - def scatter(self, inputs, kwargs, device_ids): - return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim) - - def parallel_apply(self, replicas, inputs, kwargs): - return parallel_apply(replicas, inputs, kwargs) - - def gather(self, outputs, output_device): - return gather(outputs, output_device, dim=self.dim)
    - - -def data_parallel(module, inputs, device_ids, output_device=None, dim=0, module_kwargs=None): - """Evaluates module(input) in parallel across the GPUs given in device_ids. - - This is the functional version of the DataParallel module. - - Args: - module: the module to evaluate in parallel - inputs: inputs to the module - device_ids: GPU ids on which to replicate module - output_device: GPU location of the output Use -1 to indicate the CPU. - (default: device_ids[0]) - Returns: - a Variable containing the result of module(input) located on - output_device - """ - if not isinstance(inputs, tuple): - inputs = (inputs,) - - if output_device is None: - output_device = device_ids[0] - - inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim) - if len(device_ids) == 1: - return module(*inputs[0], **module_kwargs[0]) - replicas = replicate(module, device_ids[:len(inputs)]) - outputs = parallel_apply(replicas, inputs, module_kwargs) - return gather(outputs, output_device, dim) -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/nn/parameter.html b/docs/_modules/torch/nn/parameter.html deleted file mode 100644 index f7abfe01ba58..000000000000 --- a/docs/_modules/torch/nn/parameter.html +++ /dev/null @@ -1,585 +0,0 @@ - - - - - - - - - - - torch.nn.parameter — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.nn.parameter

    -from torch.autograd import Variable
    -
    -
    -
    [docs]class Parameter(Variable): - """A kind of Variable that is to be considered a module parameter. - - Parameters are :class:`~torch.autograd.Variable` subclasses, that have a - very special property when used with :class:`Module` s - when they're - assigned as Module attributes they are automatically added to the list of - its parameters, and will appear e.g. in :meth:`~Module.parameters` iterator. - Assigning a Variable doesn't have such effect. This is because one might - want to cache some temporary state, like last hidden state of the RNN, in - the model. If there was no such class as :class:`Parameter`, these - temporaries would get registered too. - - Another difference is that parameters can't be volatile and that they - require gradient by default. - - Arguments: - data (Tensor): parameter tensor. - requires_grad (bool, optional): if the parameter requires gradient. See - :ref:`excluding-subgraphs` for more details. - """ - def __new__(cls, data=None, requires_grad=True): - return super(Parameter, cls).__new__(cls, data, requires_grad=requires_grad) - - def __repr__(self): - return 'Parameter containing:' + self.data.__repr__()
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/nn/utils/clip_grad.html b/docs/_modules/torch/nn/utils/clip_grad.html deleted file mode 100644 index 76982cdf0c31..000000000000 --- a/docs/_modules/torch/nn/utils/clip_grad.html +++ /dev/null @@ -1,589 +0,0 @@ - - - - - - - - - - - torch.nn.utils.clip_grad — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.nn.utils.clip_grad

    -
    -
    [docs]def clip_grad_norm(parameters, max_norm, norm_type=2): - """Clips gradient norm of an iterable of parameters. - - The norm is computed over all gradients together, as if they were - concatenated into a single vector. Gradients are modified in-place. - - Arguments: - parameters (Iterable[Variable]): an iterable of Variables that will have - gradients normalized - max_norm (float or int): max norm of the gradients - norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. - - Returns: - Total norm of the parameters (viewed as a single vector). - """ - parameters = list(filter(lambda p: p.grad is not None, parameters)) - max_norm = float(max_norm) - norm_type = float(norm_type) - if norm_type == float('inf'): - total_norm = max(p.grad.data.abs().max() for p in parameters) - else: - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm ** norm_type - total_norm = total_norm ** (1. / norm_type) - clip_coef = max_norm / (total_norm + 1e-6) - if clip_coef < 1: - for p in parameters: - p.grad.data.mul_(clip_coef) - return total_norm
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/nn/utils/rnn.html b/docs/_modules/torch/nn/utils/rnn.html deleted file mode 100644 index 78c5a3885fa1..000000000000 --- a/docs/_modules/torch/nn/utils/rnn.html +++ /dev/null @@ -1,683 +0,0 @@ - - - - - - - - - - - torch.nn.utils.rnn — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.nn.utils.rnn

    -from collections import namedtuple
    -import torch
    -from torch.autograd import Variable
    -
    -
    -PackedSequence_ = namedtuple('PackedSequence', ['data', 'batch_sizes'])
    -
    -
    -
    [docs]class PackedSequence(PackedSequence_): - """Holds the data and list of batch_sizes of a packed sequence. - - All RNN modules accept packed sequences as inputs. - - Note: - Instances of this class should never be created manually. They are meant - to be instantiated by functions like :func:`pack_padded_sequence`. - - Attributes: - data (Variable): Variable containing packed sequence - batch_sizes (list[int]): list of integers holding information about - the batch size at each sequence step - """ - pass
    - - -
    [docs]def pack_padded_sequence(input, lengths, batch_first=False): - """Packs a Variable containing padded sequences of variable length. - - Input can be of size ``TxBx*`` where T is the length of the longest sequence - (equal to ``lengths[0]``), B is the batch size, and * is any number of - dimensions (including 0). If ``batch_first`` is True ``BxTx*`` inputs are expected. - - The sequences should be sorted by length in a decreasing order, i.e. - ``input[:,0]`` should be the longest sequence, and ``input[:,B-1]`` the - shortest one. - - Note: - This function accept any input that has at least two dimensions. You - can apply it to pack the labels, and use the output of the RNN with - them to compute the loss directly. A Variable can be retrieved from - a :class:`PackedSequence` object by accessing its ``.data`` attribute. - - Arguments: - input (Variable): padded batch of variable length sequences. - lengths (list[int]): list of sequences lengths of each batch element. - batch_first (bool, optional): if True, the input is expected in BxTx* - format. - - Returns: - a :class:`PackedSequence` object - """ - if lengths[-1] <= 0: - raise ValueError("length of all samples has to be greater than 0, " - "but found an element in 'lengths' that is <=0") - if batch_first: - input = input.transpose(0, 1) - - steps = [] - batch_sizes = [] - lengths_iter = reversed(lengths) - current_length = next(lengths_iter) - batch_size = input.size(1) - if len(lengths) != batch_size: - raise ValueError("lengths array has incorrect size") - - for step, step_value in enumerate(input, 1): - steps.append(step_value[:batch_size]) - batch_sizes.append(batch_size) - - while step == current_length: - try: - new_length = next(lengths_iter) - except StopIteration: - current_length = None - break - - if current_length > new_length: # remember that new_length is the preceding length in the array - raise ValueError("lengths array has to be sorted in decreasing order") - batch_size -= 1 - current_length = new_length - if current_length is None: - break - return PackedSequence(torch.cat(steps), batch_sizes)
    - - -
    [docs]def pad_packed_sequence(sequence, batch_first=False): - """Pads a packed batch of variable length sequences. - - It is an inverse operation to :func:`pack_padded_sequence`. - - The returned Variable's data will be of size TxBx*, where T is the length - of the longest sequence and B is the batch size. If ``batch_size`` is True, - the data will be transposed into BxTx* format. - - Batch elements will be ordered decreasingly by their length. - - Arguments: - sequence (PackedSequence): batch to pad - batch_first (bool, optional): if True, the output will be in BxTx* format. - - Returns: - Tuple of Variable containing the padded sequence, and a list of lengths - of each sequence in the batch. - """ - var_data, batch_sizes = sequence - max_batch_size = batch_sizes[0] - output = var_data.data.new(len(batch_sizes), max_batch_size, *var_data.size()[1:]).zero_() - output = Variable(output) - - lengths = [] - data_offset = 0 - prev_batch_size = batch_sizes[0] - for i, batch_size in enumerate(batch_sizes): - output[i, :batch_size] = var_data[data_offset:data_offset + batch_size] - data_offset += batch_size - - dec = prev_batch_size - batch_size - if dec > 0: - lengths.extend((i,) * dec) - prev_batch_size = batch_size - lengths.extend((i + 1,) * batch_size) - lengths.reverse() - - if batch_first: - output = output.transpose(0, 1) - return output, lengths
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/optim/adadelta.html b/docs/_modules/torch/optim/adadelta.html deleted file mode 100644 index f82ac915c42c..000000000000 --- a/docs/_modules/torch/optim/adadelta.html +++ /dev/null @@ -1,622 +0,0 @@ - - - - - - - - - - - torch.optim.adadelta — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.optim.adadelta

    -from .optimizer import Optimizer
    -
    -
    -
    [docs]class Adadelta(Optimizer): - """Implements Adadelta algorithm. - - It has been proposed in `ADADELTA: An Adaptive Learning Rate Method`__. - - Arguments: - params (iterable): iterable of parameters to optimize or dicts defining - parameter groups - rho (float, optional): coefficient used for computing a running average - of squared gradients (default: 0.9) - eps (float, optional): term added to the denominator to improve - numerical stability (default: 1e-6) - lr (float, optional): coefficient that scale delta before it is applied to the - parameters (default: 1.0) - weight_decay (float, optional): weight decay (L2 penalty) (default: 0) - - __ https://arxiv.org/abs/1212.5701 - """ - - def __init__(self, params, lr=1.0, rho=0.9, eps=1e-6, weight_decay=0): - defaults = dict(lr=lr, rho=rho, eps=eps, weight_decay=weight_decay) - super(Adadelta, self).__init__(params, defaults) - -
    [docs] def step(self, closure=None): - """Performs a single optimization step. - - Arguments: - closure (callable, optional): A closure that reevaluates the model - and returns the loss. - """ - loss = None - if closure is not None: - loss = closure() - - for group in self.param_groups: - for p in group['params']: - if p.grad is None: - continue - grad = p.grad.data - state = self.state[p] - - # State initialization - if len(state) == 0: - state['step'] = 0 - state['square_avg'] = grad.new().resize_as_(grad).zero_() - state['acc_delta'] = grad.new().resize_as_(grad).zero_() - - square_avg, acc_delta = state['square_avg'], state['acc_delta'] - rho, eps = group['rho'], group['eps'] - - state['step'] += 1 - - if group['weight_decay'] != 0: - grad = grad.add(group['weight_decay'], p.data) - - square_avg.mul_(rho).addcmul_(1 - rho, grad, grad) - std = square_avg.add(eps).sqrt_() - delta = acc_delta.add(eps).sqrt_().div_(std).mul_(grad) - p.data.add_(-group['lr'], delta) - acc_delta.mul_(rho).addcmul_(1 - rho, delta, delta) - - return loss
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/optim/adagrad.html b/docs/_modules/torch/optim/adagrad.html deleted file mode 100644 index 4b6f7dafd18f..000000000000 --- a/docs/_modules/torch/optim/adagrad.html +++ /dev/null @@ -1,641 +0,0 @@ - - - - - - - - - - - torch.optim.adagrad — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.optim.adagrad

    -import torch
    -
    -from .optimizer import Optimizer
    -
    -
    -
    [docs]class Adagrad(Optimizer): - """Implements Adagrad algorithm. - - It has been proposed in `Adaptive Subgradient Methods for Online Learning and Stochastic Optimization`_. - - Arguments: - params (iterable): iterable of parameters to optimize or dicts defining - parameter groups - lr (float, optional): learning rate (default: 1e-2) - lr_decay (float, optional): learning rate decay (default: 0) - weight_decay (float, optional): weight decay (L2 penalty) (default: 0) - - .. _Adaptive Subgradient Methods for Online Learning and Stochastic Optimization: - http://jmlr.org/papers/v12/duchi11a.html - """ - - def __init__(self, params, lr=1e-2, lr_decay=0, weight_decay=0): - defaults = dict(lr=lr, lr_decay=lr_decay, weight_decay=weight_decay) - super(Adagrad, self).__init__(params, defaults) - - for group in self.param_groups: - for p in group['params']: - state = self.state[p] - state['step'] = 0 - state['sum'] = p.data.new().resize_as_(p.data).zero_() - - def share_memory(self): - for group in self.param_groups: - for p in group['params']: - state = self.state[p] - state['sum'].share_memory_() - -
    [docs] def step(self, closure=None): - """Performs a single optimization step. - - Arguments: - closure (callable, optional): A closure that reevaluates the model - and returns the loss. - """ - loss = None - if closure is not None: - loss = closure() - - for group in self.param_groups: - for p in group['params']: - if p.grad is None: - continue - grad = p.grad.data - state = self.state[p] - - state['step'] += 1 - - if group['weight_decay'] != 0: - if p.grad.data.is_sparse: - raise RuntimeError("weight_decay option is not compatible with sparse gradients ") - grad = grad.add(group['weight_decay'], p.data) - - clr = group['lr'] / (1 + (state['step'] - 1) * group['lr_decay']) - - if p.grad.data.is_sparse: - grad_indices = grad.indices() - grad_values = grad.values() - size = torch.Size([x for x in grad.size()]) - - def make_sparse(values): - constructor = type(p.grad.data) - if grad_indices.dim() == 0 or values.dim() == 0: - return constructor() - return constructor(grad_indices, values, size) - state['sum'].add_(make_sparse(grad_values.pow(2))) - std = state['sum'].sparse_mask(grad) - std_values = std.values().sqrt_().add_(1e-10) - p.data.add_(-clr, make_sparse(grad_values / std_values)) - else: - state['sum'].addcmul_(1, grad, grad) - std = state['sum'].sqrt().add_(1e-10) - p.data.addcdiv_(-clr, grad, std) - - return loss
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/optim/adam.html b/docs/_modules/torch/optim/adam.html deleted file mode 100644 index efe59100e0ae..000000000000 --- a/docs/_modules/torch/optim/adam.html +++ /dev/null @@ -1,633 +0,0 @@ - - - - - - - - - - - torch.optim.adam — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.optim.adam

    -import math
    -from .optimizer import Optimizer
    -
    -
    -
    [docs]class Adam(Optimizer): - """Implements Adam algorithm. - - It has been proposed in `Adam: A Method for Stochastic Optimization`_. - - Arguments: - params (iterable): iterable of parameters to optimize or dicts defining - parameter groups - lr (float, optional): learning rate (default: 1e-3) - betas (Tuple[float, float], optional): coefficients used for computing - running averages of gradient and its square (default: (0.9, 0.999)) - eps (float, optional): term added to the denominator to improve - numerical stability (default: 1e-8) - weight_decay (float, optional): weight decay (L2 penalty) (default: 0) - - .. _Adam\: A Method for Stochastic Optimization: - https://arxiv.org/abs/1412.6980 - """ - - def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, - weight_decay=0): - defaults = dict(lr=lr, betas=betas, eps=eps, - weight_decay=weight_decay) - super(Adam, self).__init__(params, defaults) - -
    [docs] def step(self, closure=None): - """Performs a single optimization step. - - Arguments: - closure (callable, optional): A closure that reevaluates the model - and returns the loss. - """ - loss = None - if closure is not None: - loss = closure() - - for group in self.param_groups: - for p in group['params']: - if p.grad is None: - continue - grad = p.grad.data - state = self.state[p] - - # State initialization - if len(state) == 0: - state['step'] = 0 - # Exponential moving average of gradient values - state['exp_avg'] = grad.new().resize_as_(grad).zero_() - # Exponential moving average of squared gradient values - state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_() - - exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] - beta1, beta2 = group['betas'] - - state['step'] += 1 - - if group['weight_decay'] != 0: - grad = grad.add(group['weight_decay'], p.data) - - # Decay the first and second moment running average coefficient - exp_avg.mul_(beta1).add_(1 - beta1, grad) - exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) - - denom = exp_avg_sq.sqrt().add_(group['eps']) - - bias_correction1 = 1 - beta1 ** state['step'] - bias_correction2 = 1 - beta2 ** state['step'] - step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1 - - p.data.addcdiv_(-step_size, exp_avg, denom) - - return loss
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/optim/adamax.html b/docs/_modules/torch/optim/adamax.html deleted file mode 100644 index ab0e7c9129e7..000000000000 --- a/docs/_modules/torch/optim/adamax.html +++ /dev/null @@ -1,632 +0,0 @@ - - - - - - - - - - - torch.optim.adamax — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.optim.adamax

    -import torch
    -from .optimizer import Optimizer
    -
    -
    -
    [docs]class Adamax(Optimizer): - """Implements Adamax algorithm (a variant of Adam based on infinity norm). - - It has been proposed in `Adam: A Method for Stochastic Optimization`__. - - Arguments: - params (iterable): iterable of parameters to optimize or dicts defining - parameter groups - lr (float, optional): learning rate (default: 2e-3) - betas (Tuple[float, float], optional): coefficients used for computing - running averages of gradient and its square - eps (float, optional): term added to the denominator to improve - numerical stability (default: 1e-8) - weight_decay (float, optional): weight decay (L2 penalty) (default: 0) - - __ https://arxiv.org/abs/1412.6980 - """ - - def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8, - weight_decay=0): - defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) - super(Adamax, self).__init__(params, defaults) - -
    [docs] def step(self, closure=None): - """Performs a single optimization step. - - Arguments: - closure (callable, optional): A closure that reevaluates the model - and returns the loss. - """ - loss = None - if closure is not None: - loss = closure() - - for group in self.param_groups: - for p in group['params']: - if p.grad is None: - continue - grad = p.grad.data - state = self.state[p] - - # State initialization - if len(state) == 0: - state['step'] = 0 - state['exp_avg'] = grad.new().resize_as_(grad).zero_() - state['exp_inf'] = grad.new().resize_as_(grad).zero_() - - exp_avg, exp_inf = state['exp_avg'], state['exp_inf'] - beta1, beta2 = group['betas'] - eps = group['eps'] - - state['step'] += 1 - - if group['weight_decay'] != 0: - grad = grad.add(group['weight_decay'], p.data) - - # Update biased first moment estimate. - exp_avg.mul_(beta1).add_(1 - beta1, grad) - # Update the exponentially weighted infinity norm. - norm_buf = torch.cat([ - exp_inf.mul_(beta2).unsqueeze(0), - grad.abs().add_(eps).unsqueeze_(0) - ], 0) - state['exp_inf'] = exp_inf = (torch.max(norm_buf, 0)[0]).squeeze_(0) - - bias_correction = 1 - beta1 ** state['step'] - clr = group['lr'] / bias_correction - - p.data.addcdiv_(-clr, exp_avg, exp_inf) - - return loss
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/optim/asgd.html b/docs/_modules/torch/optim/asgd.html deleted file mode 100644 index 35b9c0c22eaf..000000000000 --- a/docs/_modules/torch/optim/asgd.html +++ /dev/null @@ -1,632 +0,0 @@ - - - - - - - - - - - torch.optim.asgd — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.optim.asgd

    -import math
    -from .optimizer import Optimizer
    -
    -
    -
    [docs]class ASGD(Optimizer): - """Implements Averaged Stochastic Gradient Descent. - - It has been proposed in `Acceleration of stochastic approximation by averaging`_. - - Arguments: - params (iterable): iterable of parameters to optimize or dicts defining - parameter groups - lr (float, optional): learning rate (default: 1e-2) - lambd (float, optional): decay term (default: 1e-4) - alpha (float, optional): power for eta update (default: 0.75) - t0 (float, optional): point at which to start averaging (default: 1e6) - weight_decay (float, optional): weight decay (L2 penalty) (default: 0) - - .. _Acceleration of stochastic approximation by averaging: - http://dl.acm.org/citation.cfm?id=131098 - """ - - def __init__(self, params, lr=1e-2, lambd=1e-4, alpha=0.75, t0=1e6, weight_decay=0): - defaults = dict(lr=lr, lambd=lambd, alpha=alpha, t0=t0, - weight_decay=weight_decay) - super(ASGD, self).__init__(params, defaults) - -
    [docs] def step(self, closure=None): - """Performs a single optimization step. - - Arguments: - closure (callable, optional): A closure that reevaluates the model - and returns the loss. - """ - loss = None - if closure is not None: - loss = closure() - - for group in self.param_groups: - for p in group['params']: - if p.grad is None: - continue - grad = p.grad.data - state = self.state[p] - - # State initialization - if len(state) == 0: - state['step'] = 0 - state['eta'] = group['lr'] - state['mu'] = 1 - state['ax'] = grad.new().resize_as_(grad).zero_() - - state['step'] += 1 - - if group['weight_decay'] != 0: - grad = grad.add(group['weight_decay'], p.data) - - # decay term - p.data.mul_(1 - group['lambd'] * state['eta']) - - # update parameter - p.data.add_(-state['eta'], grad) - - # averaging - if state['mu'] != 1: - state['ax'].add_(p.data.sub(state['ax']).mul(state['mu'])) - else: - state['ax'].copy_(p.data) - - # update eta and mu - state['eta'] = (group['lr'] / - math.pow((1 + group['lambd'] * group['lr'] * state['step']), group['alpha'])) - state['mu'] = 1 / max(1, state['step'] - group['t0']) - - return loss
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/optim/lbfgs.html b/docs/_modules/torch/optim/lbfgs.html deleted file mode 100644 index 7244819cb2a7..000000000000 --- a/docs/_modules/torch/optim/lbfgs.html +++ /dev/null @@ -1,805 +0,0 @@ - - - - - - - - - - - torch.optim.lbfgs — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.optim.lbfgs

    -import torch
    -from functools import reduce
    -from .optimizer import Optimizer
    -
    -
    -
    [docs]class LBFGS(Optimizer): - """Implements L-BFGS algorithm. - - .. warning:: - This optimizer doesn't support per-parameter options and parameter - groups (there can be only one). - - .. warning:: - Right now all parameters have to be on a single device. This will be - improved in the future. - - .. note:: - This is a very memory intensive optimizer (it requires additional - ``param_bytes * (history_size + 1)`` bytes). If it doesn't fit in memory - try reducing the history size, or use a different algorithm. - - Arguments: - lr (float): learning rate (default: 1) - max_iter (int): maximal number of iterations per optimization step - (default: 20) - max_eval (int): maximal number of function evaluations per optimization - step (default: max_iter * 1.25). - tolerance_grad (float): termination tolerance on first order optimality - (default: 1e-5). - tolerance_change (float): termination tolerance on function value/parameter - changes (default: 1e-9). - history_size (int): update history size (default: 100). - """ - - def __init__(self, params, lr=1, max_iter=20, max_eval=None, - tolerance_grad=1e-5, tolerance_change=1e-9, history_size=100, - line_search_fn=None): - if max_eval is None: - max_eval = max_iter * 5 // 4 - defaults = dict(lr=lr, max_iter=max_iter, max_eval=max_eval, - tolerance_grad=tolerance_grad, tolerance_change=tolerance_change, - history_size=history_size, line_search_fn=line_search_fn) - super(LBFGS, self).__init__(params, defaults) - - if len(self.param_groups) != 1: - raise ValueError("LBFGS doesn't support per-parameter options " - "(parameter groups)") - - self._params = self.param_groups[0]['params'] - self._numel_cache = None - - def _numel(self): - if self._numel_cache is None: - self._numel_cache = reduce(lambda total, p: total + p.numel(), self._params, 0) - return self._numel_cache - - def _gather_flat_grad(self): - views = [] - for p in self._params: - if p.grad is None: - view = p.data.new(p.data.numel()).zero_() - elif p.grad.data.is_sparse: - view = p.grad.data.to_dense().view(-1) - else: - view = p.grad.data.view(-1) - views.append(view) - return torch.cat(views, 0) - - def _add_grad(self, step_size, update): - offset = 0 - for p in self._params: - numel = p.numel() - p.data.add_(step_size, update[offset:offset + numel]) - offset += numel - assert offset == self._numel() - -
    [docs] def step(self, closure): - """Performs a single optimization step. - - Arguments: - closure (callable): A closure that reevaluates the model - and returns the loss. - """ - assert len(self.param_groups) == 1 - - group = self.param_groups[0] - lr = group['lr'] - max_iter = group['max_iter'] - max_eval = group['max_eval'] - tolerance_grad = group['tolerance_grad'] - tolerance_change = group['tolerance_change'] - line_search_fn = group['line_search_fn'] - history_size = group['history_size'] - - state = self.state['global_state'] - state.setdefault('func_evals', 0) - state.setdefault('n_iter', 0) - - # evaluate initial f(x) and df/dx - orig_loss = closure() - loss = orig_loss.data[0] - current_evals = 1 - state['func_evals'] += 1 - - flat_grad = self._gather_flat_grad() - abs_grad_sum = flat_grad.abs().sum() - - if abs_grad_sum <= tolerance_grad: - return loss - - # variables cached in state (for tracing) - d = state.get('d') - t = state.get('t') - old_dirs = state.get('old_dirs') - old_stps = state.get('old_stps') - H_diag = state.get('H_diag') - prev_flat_grad = state.get('prev_flat_grad') - prev_loss = state.get('prev_loss') - - n_iter = 0 - # optimize for a max of max_iter iterations - while n_iter < max_iter: - # keep track of nb of iterations - n_iter += 1 - state['n_iter'] += 1 - - ############################################################ - # compute gradient descent direction - ############################################################ - if state['n_iter'] == 1: - d = flat_grad.neg() - old_dirs = [] - old_stps = [] - H_diag = 1 - else: - # do lbfgs update (update memory) - y = flat_grad.sub(prev_flat_grad) - s = d.mul(t) - ys = y.dot(s) # y*s - if ys > 1e-10: - # updating memory - if len(old_dirs) == history_size: - # shift history by one (limited-memory) - old_dirs.pop(0) - old_stps.pop(0) - - # store new direction/step - old_dirs.append(s) - old_stps.append(y) - - # update scale of initial Hessian approximation - H_diag = ys / y.dot(y) # (y*y) - - # compute the approximate (L-BFGS) inverse Hessian - # multiplied by the gradient - num_old = len(old_dirs) - - if 'ro' not in state: - state['ro'] = [None] * history_size - state['al'] = [None] * history_size - ro = state['ro'] - al = state['al'] - - for i in range(num_old): - ro[i] = 1. / old_stps[i].dot(old_dirs[i]) - - # iteration in L-BFGS loop collapsed to use just one buffer - q = flat_grad.neg() - for i in range(num_old - 1, -1, -1): - al[i] = old_dirs[i].dot(q) * ro[i] - q.add_(-al[i], old_stps[i]) - - # multiply by initial Hessian - # r/d is the final direction - d = r = torch.mul(q, H_diag) - for i in range(num_old): - be_i = old_stps[i].dot(r) * ro[i] - r.add_(al[i] - be_i, old_dirs[i]) - - if prev_flat_grad is None: - prev_flat_grad = flat_grad.clone() - else: - prev_flat_grad.copy_(flat_grad) - prev_loss = loss - - ############################################################ - # compute step length - ############################################################ - # reset initial guess for step size - if state['n_iter'] == 1: - t = min(1., 1. / abs_grad_sum) * lr - else: - t = lr - - # directional derivative - gtd = flat_grad.dot(d) # g * d - - # optional line search: user function - ls_func_evals = 0 - if line_search_fn is not None: - # perform line search, using user function - raise RuntimeError("line search function is not supported yet") - else: - # no line search, simply move with fixed-step - self._add_grad(t, d) - if n_iter != max_iter: - # re-evaluate function only if not in last iteration - # the reason we do this: in a stochastic setting, - # no use to re-evaluate that function here - loss = closure().data[0] - flat_grad = self._gather_flat_grad() - abs_grad_sum = flat_grad.abs().sum() - ls_func_evals = 1 - - # update func eval - current_evals += ls_func_evals - state['func_evals'] += ls_func_evals - - ############################################################ - # check conditions - ############################################################ - if n_iter == max_iter: - break - - if current_evals >= max_eval: - break - - if abs_grad_sum <= tolerance_grad: - break - - if gtd > -tolerance_change: - break - - if d.mul(t).abs_().sum() <= tolerance_change: - break - - if abs(loss - prev_loss) < tolerance_change: - break - - state['d'] = d - state['t'] = t - state['old_dirs'] = old_dirs - state['old_stps'] = old_stps - state['H_diag'] = H_diag - state['prev_flat_grad'] = prev_flat_grad - state['prev_loss'] = prev_loss - - return orig_loss
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/optim/optimizer.html b/docs/_modules/torch/optim/optimizer.html deleted file mode 100644 index 495e796e8901..000000000000 --- a/docs/_modules/torch/optim/optimizer.html +++ /dev/null @@ -1,702 +0,0 @@ - - - - - - - - - - - torch.optim.optimizer — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.optim.optimizer

    -from collections import defaultdict
    -
    -import torch
    -from copy import deepcopy
    -from itertools import chain
    -from torch.autograd import Variable
    -
    -required = object()
    -
    -
    -
    [docs]class Optimizer(object): - """Base class for all optimizers. - - Arguments: - params (iterable): an iterable of :class:`Variable` s or - :class:`dict` s. Specifies what Variables should be optimized. - defaults: (dict): a dict containing default values of optimization - options (used when a parameter group doesn't specify them). - """ - - def __init__(self, params, defaults): - if isinstance(params, Variable) or torch.is_tensor(params): - raise TypeError("params argument given to the optimizer should be " - "an iterable of Variables or dicts, but got " + - torch.typename(params)) - - self.state = defaultdict(dict) - self.param_groups = list(params) - if len(self.param_groups) == 0: - raise ValueError("optimizer got an empty parameter list") - if not isinstance(self.param_groups[0], dict): - self.param_groups = [{'params': self.param_groups}] - - param_set = set() - for group in self.param_groups: - group['params'] = list(group['params']) - group_set = set(group['params']) - if not param_set.isdisjoint(group_set): - raise ValueError("some parameters appear in more than one " - "parameter group") - param_set.update(group_set) - - for name, default in defaults.items(): - for i, group in enumerate(self.param_groups): - if default is required and name not in group: - raise ValueError("parameter group " + str(i) + " didn't " - "specify a value of required optimization parameter " + - name) - else: - group.setdefault(name, default) - - for group in self.param_groups: - for param in group['params']: - if not isinstance(param, Variable): - raise TypeError("optimizer can only optimize Variables, " - "but one of the params is " + torch.typename(param)) - if not param.requires_grad: - raise ValueError("optimizing a parameter that doesn't " - "require gradients") - if param.creator is not None: - raise ValueError("can't optimize a non-leaf Variable") - - def __getstate__(self): - return { - 'state': self.state, - 'param_groups': self.param_groups, - } - - def __setstate__(self, state): - self.__dict__.update(state) - -
    [docs] def state_dict(self): - """Returns the state of the optimizer as a :class:`dict`. - - It contains two entries: - - * state - a dict holding current optimization state. Its content - differs between optimizer classes. - * param_groups - a dict containig all parameter groups - """ - # Save ids instead of Variables - def pack_group(group): - packed = {k: v for k, v in group.items() if k != 'params'} - packed['params'] = [id(p) for p in group['params']] - return packed - param_groups = [pack_group(g) for g in self.param_groups] - # Remap state to use ids as keys - packed_state = {(id(k) if isinstance(k, Variable) else k): v - for k, v in self.state.items()} - return { - 'state': packed_state, - 'param_groups': param_groups, - }
    - -
    [docs] def load_state_dict(self, state_dict): - """Loads the optimizer state. - - Arguments: - state_dict (dict): optimizer state. Should be an object returned - from a call to :meth:`state_dict`. - """ - # deepcopy, to be consistent with module API - state_dict = deepcopy(state_dict) - # Validate the state_dict - groups = self.param_groups - saved_groups = state_dict['param_groups'] - - if len(groups) != len(saved_groups): - raise ValueError("loaded state dict has a different number of " - "parameter groups") - param_lens = (len(g['params']) for g in groups) - saved_lens = (len(g['params']) for g in saved_groups) - if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)): - raise ValueError("loaded state dict contains a parameter group " - "that doesn't match the size of optimizer's group") - - # Update the state - id_map = {old_id: p for old_id, p in - zip(chain(*(g['params'] for g in saved_groups)), - chain(*(g['params'] for g in groups)))} - state = {id_map.get(k, k): v for k, v in state_dict['state'].items()} - - # Update parameter groups, setting their 'params' value - def update_group(group, new_group): - new_group['params'] = group['params'] - return new_group - param_groups = [ - update_group(g, ng) for g, ng in zip(groups, saved_groups)] - self.__setstate__({'state': state, 'param_groups': param_groups})
    - -
    [docs] def zero_grad(self): - """Clears the gradients of all optimized :class:`Variable` s.""" - for group in self.param_groups: - for param in group['params']: - if param.grad is not None: - param.grad.data.zero_()
    - -
    [docs] def step(self, closure): - """Performs a single optimization step (parameter update). - - Arguments: - closure (callable): A closure that reevaluates the model and - returns the loss. Optional for most optimizers. - """ - raise NotImplementedError
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/optim/rmsprop.html b/docs/_modules/torch/optim/rmsprop.html deleted file mode 100644 index d9143aa16da8..000000000000 --- a/docs/_modules/torch/optim/rmsprop.html +++ /dev/null @@ -1,644 +0,0 @@ - - - - - - - - - - - torch.optim.rmsprop — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.optim.rmsprop

    -from .optimizer import Optimizer
    -
    -
    -
    [docs]class RMSprop(Optimizer): - """Implements RMSprop algorithm. - - Proposed by G. Hinton in his `course <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_. - - The centered version first appears in `Generating Sequences - With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_. - - Arguments: - params (iterable): iterable of parameters to optimize or dicts defining - parameter groups - lr (float, optional): learning rate (default: 1e-2) - momentum (float, optional): momentum factor (default: 0) - alpha (float, optional): smoothing constant (default: 0.99) - eps (float, optional): term added to the denominator to improve - numerical stability (default: 1e-8) - centered (bool, optional) : if True, compute the centered RMSProp, - the gradient is normalized by an estimation of its variance - weight_decay (float, optional): weight decay (L2 penalty) (default: 0) - - """ - - def __init__(self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False): - defaults = dict(lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay) - super(RMSprop, self).__init__(params, defaults) - - def __setstate__(self, state): - super(RMSprop, self).__setstate__(state) - for group in self.param_groups: - group.setdefault('momentum', 0) - group.setdefault('centered', False) - -
    [docs] def step(self, closure=None): - """Performs a single optimization step. - - Arguments: - closure (callable, optional): A closure that reevaluates the model - and returns the loss. - """ - loss = None - if closure is not None: - loss = closure() - - for group in self.param_groups: - for p in group['params']: - if p.grad is None: - continue - grad = p.grad.data - state = self.state[p] - - # State initialization - if len(state) == 0: - state['step'] = 0 - state['square_avg'] = grad.new().resize_as_(grad).zero_() - if group['momentum'] > 0: - state['momentum_buffer'] = grad.new().resize_as_(grad).zero_() - if group['centered']: - state['grad_avg'] = grad.new().resize_as_(grad).zero_() - - square_avg = state['square_avg'] - alpha = group['alpha'] - - state['step'] += 1 - - if group['weight_decay'] != 0: - grad = grad.add(group['weight_decay'], p.data) - - square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad) - - if group['centered']: - grad_avg = state['grad_avg'] - grad_avg.mul_(alpha).add_(1 - alpha, grad) - avg = square_avg.addcmul(-1, grad_avg, grad_avg).sqrt().add_(group['eps']) - else: - avg = square_avg.sqrt().add_(group['eps']) - - if group['momentum'] > 0: - buf = state['momentum_buffer'] - buf.mul_(group['momentum']).addcdiv_(grad, avg) - p.data.add_(-group['lr'], buf) - else: - p.data.addcdiv_(-group['lr'], grad, avg) - - return loss
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/optim/rprop.html b/docs/_modules/torch/optim/rprop.html deleted file mode 100644 index 1093e95dceba..000000000000 --- a/docs/_modules/torch/optim/rprop.html +++ /dev/null @@ -1,627 +0,0 @@ - - - - - - - - - - - torch.optim.rprop — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.optim.rprop

    -import math
    -from .optimizer import Optimizer
    -
    -
    -
    [docs]class Rprop(Optimizer): - """Implements the resilient backpropagation algorithm. - - Arguments: - params (iterable): iterable of parameters to optimize or dicts defining - parameter groups - lr (float, optional): learning rate (default: 1e-2) - etas (Tuple[float, float], optional): pair of (etaminus, etaplis), that - are multiplicative increase and decrease factors (default: (0.5, 1.2)) - step_sizes (Tuple[float, float], optional): a pair of minimal and - maximal allowed step sizes (default: (1e-6, 50)) - """ - - def __init__(self, params, lr=1e-2, etas=(0.5, 1.2), step_sizes=(1e-6, 50)): - defaults = dict(lr=lr, etas=etas, step_sizes=step_sizes) - super(Rprop, self).__init__(params, defaults) - -
    [docs] def step(self, closure=None): - """Performs a single optimization step. - - Arguments: - closure (callable, optional): A closure that reevaluates the model - and returns the loss. - """ - loss = None - if closure is not None: - loss = closure() - - for group in self.param_groups: - for p in group['params']: - if p.grad is None: - continue - grad = p.grad.data - state = self.state[p] - - # State initialization - if len(state) == 0: - state['step'] = 0 - state['prev'] = grad.new().resize_as_(grad).zero_() - state['step_size'] = grad.new().resize_as_(grad).fill_(group['lr']) - - etaminus, etaplus = group['etas'] - step_size_min, step_size_max = group['step_sizes'] - step_size = state['step_size'] - - state['step'] += 1 - - sign = grad.mul(state['prev']).sign() - sign[sign.gt(0)] = etaplus - sign[sign.lt(0)] = etaminus - sign[sign.eq(0)] = 1 - - # update stepsizes with step size updates - step_size.mul_(sign).clamp_(step_size_min, step_size_max) - - # for dir<0, dfdx=0 - # for dir>=0 dfdx=dfdx - grad = grad.clone() - grad[sign.eq(etaminus)] = 0 - - # update parameters - p.data.addcmul_(-1, grad.sign(), step_size) - - state['prev'].copy_(grad) - - return loss
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/optim/sgd.html b/docs/_modules/torch/optim/sgd.html deleted file mode 100644 index 85ebd129ce69..000000000000 --- a/docs/_modules/torch/optim/sgd.html +++ /dev/null @@ -1,635 +0,0 @@ - - - - - - - - - - - torch.optim.sgd — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.optim.sgd

    -from .optimizer import Optimizer, required
    -
    -
    -
    [docs]class SGD(Optimizer): - """Implements stochastic gradient descent (optionally with momentum). - - Nesterov momentum is based on the formula from - `On the importance of initialization and momentum in deep learning`__. - - Args: - params (iterable): iterable of parameters to optimize or dicts defining - parameter groups - lr (float): learning rate - momentum (float, optional): momentum factor (default: 0) - weight_decay (float, optional): weight decay (L2 penalty) (default: 0) - dampening (float, optional): dampening for momentum (default: 0) - nesterov (bool, optional): enables Nesterov momentum (default: False) - - Example: - >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) - >>> optimizer.zero_grad() - >>> loss_fn(model(input), target).backward() - >>> optimizer.step() - - __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf - """ - - def __init__(self, params, lr=required, momentum=0, dampening=0, - weight_decay=0, nesterov=False): - defaults = dict(lr=lr, momentum=momentum, dampening=dampening, - weight_decay=weight_decay, nesterov=nesterov) - if nesterov and (momentum <= 0 or dampening != 0): - raise ValueError("Nesterov momentum requires a momentum and zero dampening") - super(SGD, self).__init__(params, defaults) - - def __setstate__(self, state): - super(SGD, self).__setstate__(state) - for group in self.param_groups: - group.setdefault('nesterov', False) - -
    [docs] def step(self, closure=None): - """Performs a single optimization step. - - Arguments: - closure (callable, optional): A closure that reevaluates the model - and returns the loss. - """ - loss = None - if closure is not None: - loss = closure() - - for group in self.param_groups: - weight_decay = group['weight_decay'] - momentum = group['momentum'] - dampening = group['dampening'] - nesterov = group['nesterov'] - - for p in group['params']: - if p.grad is None: - continue - d_p = p.grad.data - if weight_decay != 0: - d_p.add_(weight_decay, p.data) - if momentum != 0: - param_state = self.state[p] - if 'momentum_buffer' not in param_state: - buf = param_state['momentum_buffer'] = d_p.clone() - else: - buf = param_state['momentum_buffer'] - buf.mul_(momentum).add_(1 - dampening, d_p) - if nesterov: - d_p = d_p.add(momentum, buf) - else: - d_p = buf - - p.data.add_(-group['lr'], d_p) - - return loss
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/serialization.html b/docs/_modules/torch/serialization.html deleted file mode 100644 index 2256139ab099..000000000000 --- a/docs/_modules/torch/serialization.html +++ /dev/null @@ -1,944 +0,0 @@ - - - - - - - - - - - torch.serialization — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.serialization

    -import difflib
    -import inspect
    -import os
    -import shutil
    -import struct
    -import sys
    -import torch
    -import tarfile
    -import tempfile
    -import warnings
    -from contextlib import closing, contextmanager
    -from ._utils import _import_dotted_name
    -if sys.version_info[0] == 2:
    -    import cPickle as pickle
    -else:
    -    import pickle
    -
    -DEFAULT_PROTOCOL = 2
    -
    -LONG_SIZE = struct.Struct('=l').size
    -INT_SIZE = struct.Struct('=i').size
    -SHORT_SIZE = struct.Struct('=h').size
    -
    -MAGIC_NUMBER = 0x1950a86a20f9469cfc6c
    -PROTOCOL_VERSION = 1001
    -STORAGE_KEY_SEPARATOR = ','
    -
    -
    -class SourceChangeWarning(Warning):
    -    pass
    -
    -
    -@contextmanager
    -def mkdtemp():
    -    path = tempfile.mkdtemp()
    -    yield path
    -    shutil.rmtree(path)
    -
    -
    -_package_registry = []
    -
    -
    -def register_package(priority, tagger, deserializer):
    -    queue_elem = (priority, tagger, deserializer)
    -    _package_registry.append(queue_elem)
    -    _package_registry.sort()
    -
    -
    -def _cpu_tag(obj):
    -    if type(obj).__module__ == 'torch':
    -        return 'cpu'
    -
    -
    -def _cuda_tag(obj):
    -    if type(obj).__module__ == 'torch.cuda':
    -        return 'cuda:' + str(obj.get_device())
    -
    -
    -def _cpu_deserialize(obj, location):
    -    if location == 'cpu':
    -        return obj
    -
    -
    -def _cuda_deserialize(obj, location):
    -    if location.startswith('cuda'):
    -        device_id = max(int(location[5:]), 0)
    -        return obj.cuda(device_id)
    -
    -
    -register_package(10, _cpu_tag, _cpu_deserialize)
    -register_package(20, _cuda_tag, _cuda_deserialize)
    -
    -
    -def location_tag(storage):
    -    for _, tagger, _ in _package_registry:
    -        location = tagger(storage)
    -        if location:
    -            return location
    -    raise RuntimeError("don't know how to determine data location of " +
    -                       torch.typename(storage))
    -
    -
    -def default_restore_location(storage, location):
    -    for _, _, fn in _package_registry:
    -        result = fn(storage, location)
    -        if result is not None:
    -            return result
    -    raise RuntimeError("don't know how to restore data location of " +
    -                       torch.typename(storage) + " (tagged with " +
    -                       location + ")")
    -
    -
    -def normalize_storage_type(storage_type):
    -    return getattr(torch, storage_type.__name__)
    -
    -
    -def storage_to_tensor_type(storage):
    -    storage_type = type(storage)
    -    module = _import_dotted_name(storage_type.__module__)
    -    return getattr(module, storage_type.__name__.replace('Storage', 'Tensor'))
    -
    -
    -
    [docs]def save(obj, f, pickle_module=pickle, pickle_protocol=DEFAULT_PROTOCOL): - """Saves an object to a disk file. - - See also: :ref:`recommend-saving-models` - - Args: - obj: saved object - f: a file-like object (has to implement fileno that returns a file descriptor) - or a string containing a file name - pickle_module: module used for pickling metadata and objects - pickle_protocol: can be specified to override the default protocol - """ - new_fd = False - if isinstance(f, str) or (sys.version_info[0] == 2 and isinstance(f, unicode)): - new_fd = True - f = open(f, "wb") - try: - return _save(obj, f, pickle_module, pickle_protocol) - finally: - if new_fd: - f.close()
    - - -def _save(obj, f, pickle_module, pickle_protocol): - import torch.nn as nn - serialized_container_types = {} - serialized_storages = {} - - def persistent_id(obj): - # FIXME: the docs say that persistent_id should only return a string - # but torch store returns tuples. This works only in the binary protocol - # see - # https://docs.python.org/2/library/pickle.html#pickling-and-unpickling-external-objects - # https://github.com/python/cpython/blob/master/Lib/pickle.py#L527-L537 - if isinstance(obj, type) and issubclass(obj, nn.Module): - if obj in serialized_container_types: - return None - serialized_container_types[obj] = True - source_file = source = None - try: - source_file = inspect.getsourcefile(obj) - source = inspect.getsource(obj) - except (TypeError, IOError): - warnings.warn("Couldn't retrieve source code for container of " - "type " + obj.__name__ + ". It won't be checked " - "for correctness upon loading.") - return ('module', obj, source_file, source) - elif torch.is_storage(obj): - storage_type = normalize_storage_type(type(obj)) - root, offset = obj._root_storage() - root_key = str(root._cdata) - location = location_tag(obj) - serialized_storages[root_key] = root - is_view = obj._cdata != root._cdata - if is_view: - view_metadata = (str(obj._cdata), offset, obj.size()) - else: - view_metadata = None - - return ('storage', - storage_type, - root_key, - location, - root.size(), - view_metadata) - - return None - - sys_info = dict( - protocol_version=PROTOCOL_VERSION, - little_endian=sys.byteorder == 'little', - type_sizes=dict( - short=SHORT_SIZE, - int=INT_SIZE, - long=LONG_SIZE, - ), - ) - - pickle_module.dump(MAGIC_NUMBER, f, protocol=pickle_protocol) - pickle_module.dump(PROTOCOL_VERSION, f, protocol=pickle_protocol) - pickle_module.dump(sys_info, f, protocol=pickle_protocol) - pickler = pickle_module.Pickler(f, protocol=pickle_protocol) - pickler.persistent_id = persistent_id - pickler.dump(obj) - - serialized_storage_keys = sorted(serialized_storages.keys()) - pickle_module.dump(serialized_storage_keys, f, protocol=pickle_protocol) - f.flush() - for key in serialized_storage_keys: - serialized_storages[key]._write_file(f) - - -
    [docs]def load(f, map_location=None, pickle_module=pickle): - """Loads an object saved with :func:`torch.save` from a file. - - torch.load can dynamically remap storages to be loaded on a different device - using the map_location argument. If it's a callable, it will be called with - two arguments: storage and location tag. It's expected to either return a - storage that's been moved to a different location, or None (and the location - will be resolved using the default method). If this argument is a dict it's - expected to be a mapping from location tags used in a file, to location - tags of the current system. - - By default the location tags are 'cpu' for host tensors and 'cuda:device_id' - (e.g. 'cuda:2') for cuda tensors. User extensions can register their own - tagging and deserialization methods using register_package. - - Args: - f: a file-like object (has to implement fileno that returns a file descriptor, - and must implement seek), or a string containing a file name - map_location: a function or a dict specifying how to remap storage locations - pickle_module: module used for unpickling metadata and objects (has to match - the pickle_module used to serialize file) - - Example: - >>> torch.load('tensors.pt') - # Load all tensors onto the CPU - >>> torch.load('tensors.pt', map_location=lambda storage, loc: storage) - # Map tensors from GPU 1 to GPU 0 - >>> torch.load('tensors.pt', map_location={'cuda:1':'cuda:0'}) - """ - new_fd = False - if isinstance(f, str) or (sys.version_info[0] == 2 and isinstance(f, unicode)): - new_fd = True - f = open(f, 'rb') - try: - return _load(f, map_location, pickle_module) - finally: - if new_fd: - f.close()
    - - -def _load(f, map_location, pickle_module): - deserialized_objects = {} - - if map_location is None: - restore_location = default_restore_location - elif isinstance(map_location, dict): - def restore_location(storage, location): - location = map_location.get(location, location) - return default_restore_location(storage, location) - else: - def restore_location(storage, location): - result = map_location(storage, location) - if result is None: - result = default_restore_location(storage, location) - return result - - def _check_container_source(container_type, source_file, original_source): - current_source = inspect.getsource(container_type) - if original_source != current_source: - if container_type.dump_patches: - file_name = container_type.__name__ + '.patch' - diff = difflib.unified_diff(current_source.split('\n'), - original_source.split('\n'), - source_file, - source_file, lineterm="") - lines = '\n'.join(diff) - try: - with open(file_name, 'a+') as f: - file_size = f.seek(0, 2) - f.seek(0) - if file_size == 0: - f.write(lines) - elif file_size != len(lines) or f.read() != lines: - raise IOError - msg = ("Saved a reverse patch to " + file_name + ". " - "Run `patch -p0 < " + file_name + "` to revert your " - "changes.") - except IOError: - msg = ("Tried to save a patch, but couldn't create a " - "writable file " + file_name + ". Make sure it " - "doesn't exist and your working directory is " - "writable.") - else: - msg = ("you can retrieve the original source code by " - "accessing the object's source attribute or set " - "`torch.nn.Module.dump_patches = True` and use the " - "patch tool to revert the changes.") - msg = ("source code of class '{}' has changed. {}" - .format(torch.typename(container_type), msg)) - warnings.warn(msg, SourceChangeWarning) - - def legacy_load(f): - deserialized_objects = {} - - def persistent_load(saved_id): - if isinstance(saved_id, tuple): - # Ignore containers that don't have any sources saved - if all(saved_id[1:]): - _check_container_source(*saved_id) - return saved_id[0] - return deserialized_objects[int(saved_id)] - - with closing(tarfile.open(fileobj=f, mode='r:', format=tarfile.PAX_FORMAT)) as tar, \ - mkdtemp() as tmpdir: - - tar.extract('storages', path=tmpdir) - with open(os.path.join(tmpdir, 'storages'), 'rb', 0) as f: - num_storages = pickle_module.load(f) - for i in range(num_storages): - args = pickle_module.load(f) - key, location, storage_type = args - obj = storage_type._new_with_file(f) - obj = restore_location(obj, location) - deserialized_objects[key] = obj - - storage_views = pickle_module.load(f) - for target_cdata, root_cdata, offset, size in storage_views: - root = deserialized_objects[root_cdata] - deserialized_objects[target_cdata] = root[offset:offset + size] - - tar.extract('tensors', path=tmpdir) - with open(os.path.join(tmpdir, 'tensors'), 'rb', 0) as f: - num_tensors = pickle_module.load(f) - for i in range(num_tensors): - args = pickle_module.load(f) - key, storage_id, original_tensor_type = args - storage = deserialized_objects[storage_id] - tensor_type = storage_to_tensor_type(storage) - tensor = tensor_type._new_with_metadata_file(f, storage) - deserialized_objects[key] = tensor - - pickle_file = tar.extractfile('pickle') - unpickler = pickle_module.Unpickler(pickle_file) - unpickler.persistent_load = persistent_load - result = unpickler.load() - return result - - deserialized_objects = {} - - def persistent_load(saved_id): - assert isinstance(saved_id, tuple) - typename = saved_id[0] - data = saved_id[1:] - - if typename == 'module': - # Ignore containers that don't have any sources saved - if all(data[1:]): - _check_container_source(*data) - return data[0] - elif typename == 'storage': - data_type, root_key, location, size, view_metadata = data - if root_key not in deserialized_objects: - deserialized_objects[root_key] = restore_location( - data_type(size), location) - storage = deserialized_objects[root_key] - if view_metadata is not None: - view_key, offset, view_size = view_metadata - if view_key not in deserialized_objects: - deserialized_objects[view_key] = storage[offset:offset + view_size] - return deserialized_objects[view_key] - else: - return storage - else: - raise RuntimeError("Unknown saved id type: %s" % saved_id[0]) - - # try the legacy loader first, which only works if f is a tarfile - try: - return legacy_load(f) - except tarfile.TarError: - pass - - f.seek(0) - magic_number = pickle_module.load(f) - if magic_number != MAGIC_NUMBER: - raise RuntimeError("Invalid magic number; corrupt file?") - protocol_version = pickle_module.load(f) - if protocol_version != PROTOCOL_VERSION: - raise RuntimeError("Invalid protocol version: %s" % protocol_version) - - _sys_info = pickle_module.load(f) - unpickler = pickle_module.Unpickler(f) - unpickler.persistent_load = persistent_load - result = unpickler.load() - - deserialized_storage_keys = pickle_module.load(f) - - offset = f.tell() - for key in deserialized_storage_keys: - assert key in deserialized_objects - deserialized_objects[key]._set_from_file(f, offset) - offset = None - - return result -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/storage.html b/docs/_modules/torch/storage.html deleted file mode 100644 index 6fa689ae39c1..000000000000 --- a/docs/_modules/torch/storage.html +++ /dev/null @@ -1,663 +0,0 @@ - - - - - - - - - - - torch.storage — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.storage

    -import torch
    -from ._utils import _type, _cuda, _range
    -
    -
    -class _StorageBase(object):
    -    is_cuda = False
    -    is_sparse = False
    -
    -    def __str__(self):
    -        content = ' ' + '\n '.join(str(self[i]) for i in _range(len(self)))
    -        return content + '\n[{} of size {}]'.format(torch.typename(self), len(self))
    -
    -    def __repr__(self):
    -        return str(self)
    -
    -    def __iter__(self):
    -        return iter(map(lambda i: self[i], _range(self.size())))
    -
    -    def __copy__(self):
    -        return self.clone()
    -
    -    def __deepcopy__(self, memo):
    -        memo = memo.setdefault('torch', {})
    -        if self._cdata in memo:
    -            return memo[self._cdata]
    -        new_storage = self.clone()
    -        memo[self._cdata] = new_storage
    -        return new_storage
    -
    -    def __reduce__(self):
    -        return type(self), (self.tolist(),)
    -
    -    def clone(self):
    -        """Returns a copy of this storage"""
    -        return type(self)(self.size()).copy_(self)
    -
    -    def tolist(self):
    -        """Returns a list containing the elements of this storage"""
    -        return [v for v in self]
    -
    -    def cpu(self):
    -        """Returns a CPU copy of this storage if it's not already on the CPU"""
    -        return self.type(getattr(torch, self.__class__.__name__))
    -
    -    def double(self):
    -        """Casts this storage to double type"""
    -        return self.type(type(self).__module__ + '.DoubleStorage')
    -
    -    def float(self):
    -        """Casts this storage to float type"""
    -        return self.type(type(self).__module__ + '.FloatStorage')
    -
    -    def half(self):
    -        """Casts this storage to half type"""
    -        return self.type(type(self).__module__ + '.HalfStorage')
    -
    -    def long(self):
    -        """Casts this storage to long type"""
    -        return self.type(type(self).__module__ + '.LongStorage')
    -
    -    def int(self):
    -        """Casts this storage to int type"""
    -        return self.type(type(self).__module__ + '.IntStorage')
    -
    -    def short(self):
    -        """Casts this storage to short type"""
    -        return self.type(type(self).__module__ + '.ShortStorage')
    -
    -    def char(self):
    -        """Casts this storage to char type"""
    -        return self.type(type(self).__module__ + '.CharStorage')
    -
    -    def byte(self):
    -        """Casts this storage to byte type"""
    -        return self.type(type(self).__module__ + '.ByteStorage')
    -
    -    def pin_memory(self):
    -        """Copies the storage to pinned memory, if it's not already pinned."""
    -        if self.is_cuda:
    -            raise TypeError("cannot pin '{0}' only CPU memory can be pinned"
    -                            .format(self.type()))
    -        import torch.cuda
    -        allocator = torch.cuda._host_allocator()
    -        return type(self)(self.size(), allocator=allocator).copy_(self)
    -
    -    def share_memory_(self):
    -        """Moves the storage to shared memory.
    -
    -        This is a no-op for storages already in shared memory and for CUDA
    -        storages, which do not need to be moved for sharing across processes.
    -        Storages in shared memory cannot be resized.
    -
    -        Returns: self
    -        """
    -        from torch.multiprocessing import get_sharing_strategy
    -        if self.is_cuda:
    -            pass  # CUDA doesn't use POSIX shared memory
    -        elif get_sharing_strategy() == 'file_system':
    -            self._share_filename_()
    -        else:
    -            self._share_fd_()
    -        return self
    -
    -
    -_StorageBase.type = _type
    -_StorageBase.cuda = _cuda
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/tensor.html b/docs/_modules/torch/tensor.html deleted file mode 100644 index 8a80b14517dc..000000000000 --- a/docs/_modules/torch/tensor.html +++ /dev/null @@ -1,1005 +0,0 @@ - - - - - - - - - - - torch.tensor — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.tensor

    -import torch
    -from . import _tensor_str
    -from ._utils import _type, _cuda, _range, _rebuild_tensor
    -import sys
    -
    -
    -class _TensorBase(object):
    -    #: bool: True if this is a CUDA tensor
    -    is_cuda = False
    -    is_sparse = False
    -
    -    def new(self, *args, **kwargs):
    -        """Constructs a new tensor of the same data type."""
    -        return self.__class__(*args, **kwargs)
    -
    -    def type_as(self, tensor):
    -        """Returns this tensor cast to the type of the given tensor.
    -
    -        This is a no-op if the tensor is already of the correct type. This is
    -        equivalent to::
    -
    -            self.type(tensor.type())
    -
    -        Params:
    -            tensor (Tensor): the tensor which has the desired type
    -        """
    -        return self.type(tensor.type())
    -
    -    def cpu(self):
    -        """Returns a CPU copy of this tensor if it's not already on the CPU"""
    -        return self.type(getattr(torch, self.__class__.__name__))
    -
    -    def double(self):
    -        """Casts this tensor to double type"""
    -        return self.type(type(self).__module__ + '.DoubleTensor')
    -
    -    def float(self):
    -        """Casts this tensor to float type"""
    -        return self.type(type(self).__module__ + '.FloatTensor')
    -
    -    def half(self):
    -        """Casts this tensor to half-precision float type"""
    -        return self.type(type(self).__module__ + '.HalfTensor')
    -
    -    def long(self):
    -        """Casts this tensor to long type"""
    -        return self.type(type(self).__module__ + '.LongTensor')
    -
    -    def int(self):
    -        """Casts this tensor to int type"""
    -        return self.type(type(self).__module__ + '.IntTensor')
    -
    -    def short(self):
    -        """Casts this tensor to short type"""
    -        return self.type(type(self).__module__ + '.ShortTensor')
    -
    -    def char(self):
    -        """Casts this tensor to char type"""
    -        return self.type(type(self).__module__ + '.CharTensor')
    -
    -    def byte(self):
    -        """Casts this tensor to byte type"""
    -        return self.type(type(self).__module__ + '.ByteTensor')
    -
    -    def is_pinned(self):
    -        """Returns true if this tensor resides in pinned memory"""
    -        storage = self.storage()
    -        return storage.is_pinned() if storage else False
    -
    -    def pin_memory(self):
    -        """Copies the tensor to pinned memory, if it's not already pinned."""
    -        if self.is_cuda:
    -            raise TypeError("cannot pin '{0}' only CPU memory can be pinned"
    -                            .format(self.type()))
    -        storage = self.storage()
    -        if storage is None:
    -            storage = (self.storage_type())()
    -        return type(self)().set_(storage.pin_memory()).view_as(self)
    -
    -    def share_memory_(self):
    -        """Moves the underlying storage to shared memory.
    -
    -        This is a no-op if the underlying storage is already in shared memory
    -        and for CUDA tensors. Tensors in shared memory cannot be resized.
    -        """
    -        self.storage().share_memory_()
    -        return self
    -
    -    def is_shared(self):
    -        """Checks if tensor is in shared memory.
    -
    -        This is always ``True`` for CUDA tensors.
    -        """
    -        return self.storage().is_shared()
    -
    -    def __deepcopy__(self, _memo):
    -        memo = _memo.setdefault('torch', {})
    -        if self._cdata in memo:
    -            return memo[self._cdata]
    -        new_storage = self.storage().__deepcopy__(_memo)
    -        new_tensor = self.new()
    -        new_tensor.set_(new_storage, self.storage_offset(), self.size(), self.stride())
    -        memo[self._cdata] = new_tensor
    -        return new_tensor
    -
    -    def __reduce__(self):
    -        # NOTE: _rebuild_tensor does not call __setstate__
    -        args = self.__getstate__()
    -        return (_rebuild_tensor, args)
    -
    -    def __getstate__(self):
    -        return (self.storage(),
    -                self.storage_offset(),
    -                tuple(self.size()),
    -                self.stride())
    -
    -    def __setstate__(self, state):
    -        self.set_(*state)
    -
    -    def __repr__(self):
    -        return str(self)
    -
    -    def __str__(self):
    -        # All strings are unicode in Python 3, while we have to encode unicode
    -        # strings in Python2. If we can't, let python decide the best
    -        # characters to replace unicode characters with.
    -        if sys.version_info > (3,):
    -            return _tensor_str._str(self)
    -        else:
    -            if hasattr(sys.stdout, 'encoding'):
    -                return _tensor_str._str(self).encode(
    -                    sys.stdout.encoding or 'UTF-8', 'replace')
    -            else:
    -                return _tensor_str._str(self).encode('UTF-8', 'replace')
    -
    -    def __bool__(self):
    -        if self.numel() == 0:
    -            return False
    -        raise RuntimeError("bool value of non-empty " + torch.typename(self) +
    -                           " objects is ambiguous")
    -
    -    __nonzero__ = __bool__
    -
    -    def __iter__(self):
    -        return iter(map(lambda i: self.select(0, i), _range(self.size(0))))
    -
    -    def split(self, split_size, dim=0):
    -        """Splits this tensor into a tuple of tensors.
    -
    -        See :func:`torch.split`.
    -        """
    -        return torch.split(self, split_size, dim)
    -
    -    def chunk(self, n_chunks, dim=0):
    -        """Splits this tensor into a tuple of tensors.
    -
    -        See :func:`torch.chunk`.
    -        """
    -        return torch.chunk(self, n_chunks, dim)
    -
    -    def tolist(self):
    -        """Returns a nested list represenation of this tensor."""
    -        dim = self.dim()
    -        if dim == 1:
    -            return [v for v in self]
    -        elif dim > 0:
    -            return [subt.tolist() for subt in self]
    -        return []
    -
    -    def view_as(self, tensor):
    -        """Returns this tensor viewed as the size as the specified tensor.
    -
    -        This is equivalent to::
    -
    -                self.view(tensor.size())
    -        """
    -        return self.view(tensor.size())
    -
    -    def permute(self, *dims):
    -        """Permute the dimensions of this tensor.
    -
    -        Args:
    -            *dims (int...): The desired ordering of dimensions
    -
    -        Example:
    -            >>> x = torch.randn(2, 3, 5)
    -            >>> x.size()
    -            torch.Size([2, 3, 5])
    -            >>> x.permute(2, 0, 1).size()
    -            torch.Size([5, 2, 3])
    -        """
    -        perm = list(dims)
    -        tensor = self
    -        n_dims = tensor.dim()
    -        assert len(perm) == n_dims, 'Invalid permutation'
    -        for i, p in enumerate(perm):
    -            if p != i and p != -1:
    -                j = i
    -                while True:
    -                    assert 0 <= perm[j] and perm[j] < n_dims, 'Invalid permutation'
    -                    tensor = tensor.transpose(j, perm[j])
    -                    perm[j], j = -1, perm[j]
    -                    if perm[j] == i:
    -                        break
    -                perm[j] = -1
    -        return tensor
    -
    -    def expand(self, *sizes):
    -        """Returns a new view of the tensor with singleton dimensions expanded
    -        to a larger size.
    -
    -        Tensor can be also expanded to a larger number of dimensions, and the
    -        new ones will be appended at the front.
    -
    -        Expanding a tensor does not allocate new memory, but only creates a
    -        new view on the existing tensor where a dimension of size one is
    -        expanded to a larger size by setting the ``stride`` to 0. Any dimension
    -        of size 1 can be expanded to an arbitrary value without allocating new
    -        memory.
    -
    -        Args:
    -            *sizes (torch.Size or int...): The desired expanded size
    -
    -        Example:
    -            >>> x = torch.Tensor([[1], [2], [3]])
    -            >>> x.size()
    -            torch.Size([3, 1])
    -            >>> x.expand(3, 4)
    -             1  1  1  1
    -             2  2  2  2
    -             3  3  3  3
    -            [torch.FloatTensor of size 3x4]
    -        """
    -        result = self.new()
    -        if len(sizes) == 1 and isinstance(sizes[0], torch.Size):
    -            sizes = sizes[0]
    -        else:
    -            sizes = torch.Size(sizes)
    -        src = self
    -
    -        num_unsqueezed = len(sizes) - src.dim()
    -        if src.dim() == 0:
    -            raise ValueError('can\'t expand an empty tensor')
    -        if num_unsqueezed < 0:
    -            raise ValueError('the number of dimensions provided must be greater or equal tensor.dim()')
    -
    -        src_stride = [0] * num_unsqueezed + list(src.stride())
    -        src_size = [1] * num_unsqueezed + list(src.size())
    -        for i in range(num_unsqueezed - 1, -1, -1):
    -            # to be consistent with .unsqueeze()
    -            src_stride[i] = src_size[i + 1] * src_stride[i + 1]
    -
    -        # create a new geometry for tensor:
    -        for i, (size, target_size) in enumerate(zip(src_size, sizes)):
    -            if size == 1:
    -                if target_size == 1:
    -                    continue
    -                src_size[i] = target_size
    -                src_stride[i] = 0
    -            elif size != target_size:
    -                raise ValueError('incorrect size: only supporting singleton expansion (size=1)')
    -
    -        result.set_(src.storage(), src.storage_offset(), torch.Size(src_size),
    -                    tuple(src_stride))
    -        return result
    -
    -    def expand_as(self, tensor):
    -        """Expands this tensor to the size of the specified tensor.
    -
    -        This is equivalent to::
    -
    -            self.expand(tensor.size())
    -        """
    -        return self.expand(tensor.size())
    -
    -    def repeat(self, *sizes):
    -        """Repeats this tensor along the specified dimensions.
    -
    -        Unlike :meth:`expand`, this function copies the tensor's data.
    -
    -        Args:
    -            *sizes (torch.Size or int...): The number of times to repeat this tensor along each dimension
    -
    -        Example:
    -            >>> x = torch.Tensor([1, 2, 3])
    -            >>> x.repeat(4, 2)
    -             1  2  3  1  2  3
    -             1  2  3  1  2  3
    -             1  2  3  1  2  3
    -             1  2  3  1  2  3
    -            [torch.FloatTensor of size 4x6]
    -            >>> x.repeat(4, 2, 1).size()
    -            torch.Size([4, 2, 3])
    -        """
    -        # If args == (torch.Size,), then we need to unpack the tuple
    -        if len(sizes) == 1 and isinstance(sizes[0], torch.Size):
    -            sizes = sizes[0]
    -        repeats = list(sizes)
    -        result = self.new()
    -        src = self.contiguous()
    -
    -        if len(repeats) < src.dim():
    -            raise ValueError('Number of dimensions of repeat dims can not be '
    -                             'smaller than number of dimensions of tensor')
    -
    -        xtensor = src.new().set_(src)
    -        xsize = list(xtensor.size())
    -        for i in _range(len(repeats) - src.dim()):
    -            xsize = [1] + xsize
    -
    -        size = torch.Size([a * b for a, b in zip(xsize, repeats)])
    -        xtensor.resize_(torch.Size(xsize))
    -        result.resize_(size)
    -        urtensor = result.new(result)
    -        for i in _range(xtensor.dim()):
    -            urtensor = urtensor.unfold(i, xtensor.size(i), xtensor.size(i))
    -        for i in _range(urtensor.dim() - xtensor.dim()):
    -            xsize = [1] + xsize
    -        xtensor.resize_(torch.Size(xsize))
    -        xxtensor = xtensor.expand_as(urtensor)
    -        urtensor.copy_(xxtensor)
    -        return result
    -
    -    # TODO: add tests for operators
    -    def __add__(self, other):
    -        return self.add(other)
    -    __radd__ = __add__
    -
    -    def __iadd__(self, other):
    -        return self.add_(other)
    -
    -    def __sub__(self, other):
    -        return self.sub(other)
    -
    -    def __rsub__(self, other):
    -        return self.new().resize_as_(self).fill_(other).add_(-1, self)
    -
    -    def __isub__(self, other):
    -        return self.sub_(other)
    -
    -    def __mul__(self, other):
    -        return self.mul(other)
    -    __rmul__ = __mul__
    -
    -    def __imul__(self, other):
    -        return self.mul_(other)
    -
    -    def __matmul__(self, other):
    -        dim_self = self.dim()
    -        try:
    -            dim_other = other.dim()
    -        except AttributeError:  # not a tensor
    -            return NotImplemented
    -        if dim_self == 1 and dim_other == 1:
    -            return self.dot(other)
    -        if dim_self == 2 and dim_other == 1:
    -            return self.mv(other)
    -        if dim_self == 1 and dim_other == 2:
    -            return self.unsqueeze(0).mm(other).squeeze(0)
    -        elif dim_self == 2 and dim_other == 2:
    -            return self.mm(other)
    -        raise ValueError("both arguments to __matmul__ need to be 1D or 2D, "
    -                         "but they are {}D and {}D".format(dim_self, dim_other))
    -
    -    def __pow__(self, other):
    -        return self.pow(other)
    -
    -    def __ipow__(self, other):
    -        return self.pow_(other)
    -
    -    def __div__(self, other):
    -        return self.div(other)
    -    __truediv__ = __div__
    -
    -    def __rdiv__(self, other):
    -        return self.new().resize_as_(self).fill_(other).div_(self)
    -    __rtruediv__ = __rdiv__
    -
    -    def __idiv__(self, other):
    -        return self.div_(other)
    -
    -    def __mod__(self, other):
    -        return self.remainder(other)
    -
    -    def __neg__(self):
    -        return self.neg()
    -
    -    def __eq__(self, other):
    -        return self.eq(other)
    -
    -    def __ne__(self, other):
    -        return self.ne(other)
    -
    -    def __lt__(self, other):
    -        return self.lt(other)
    -
    -    def __le__(self, other):
    -        return self.le(other)
    -
    -    def __gt__(self, other):
    -        return self.gt(other)
    -
    -    def __ge__(self, other):
    -        return self.ge(other)
    -
    -    # TODO: add native add or and xor in the libs
    -    def __and__(self, other):
    -        if (type(self).__name__ != 'ByteTensor' or
    -                type(other).__name__ != 'ByteTensor'):
    -            raise RuntimeError('logical operations are supported on ByteTensors only')
    -        return (self + other).eq(2)
    -
    -    def __or__(self, other):
    -        if (type(self).__name__ != 'ByteTensor' or
    -                type(other).__name__ != 'ByteTensor'):
    -            raise RuntimeError('logical operations are supported on ByteTensors only')
    -        return (self + other).gt(0)
    -
    -    def __xor__(self, other):
    -        if (type(self).__name__ != 'ByteTensor' or
    -                type(other).__name__ != 'ByteTensor'):
    -            raise RuntimeError('logical operations are supported on ByteTensors only')
    -        return (self + other).eq(1)
    -
    -    def __iand__(self, other):
    -        if (type(self).__name__ != 'ByteTensor' or
    -                type(other).__name__ != 'ByteTensor'):
    -            raise RuntimeError('logical operations are supported on ByteTensors only')
    -        return self.mul_(other)
    -
    -    def __ior__(self, other):
    -        if (type(self).__name__ != 'ByteTensor' or
    -                type(other).__name__ != 'ByteTensor'):
    -            raise RuntimeError('logical operations are supported on ByteTensors only')
    -        return self.copy_((self + other).gt(0))
    -
    -    def __ixor__(self, other):
    -        if (type(self).__name__ != 'ByteTensor' or
    -                type(other).__name__ != 'ByteTensor'):
    -            raise RuntimeError('logical operations are supported on ByteTensors only')
    -        return self.copy_((self + other).eq(1))
    -
    -    def __hash__(self):
    -        return id(self)
    -
    -
    -_TensorBase.type = _type
    -_TensorBase.cuda = _cuda
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/utils/data/dataloader.html b/docs/_modules/torch/utils/data/dataloader.html deleted file mode 100644 index f009b1822048..000000000000 --- a/docs/_modules/torch/utils/data/dataloader.html +++ /dev/null @@ -1,817 +0,0 @@ - - - - - - - - - - - torch.utils.data.dataloader — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.utils.data.dataloader

    -import torch
    -import torch.multiprocessing as multiprocessing
    -from .sampler import SequentialSampler, RandomSampler
    -import collections
    -import math
    -import sys
    -import traceback
    -import threading
    -if sys.version_info[0] == 2:
    -    import Queue as queue
    -    string_classes = basestring
    -else:
    -    import queue
    -    string_classes = (str, bytes)
    -
    -
    -class ExceptionWrapper(object):
    -    "Wraps an exception plus traceback to communicate across threads"
    -
    -    def __init__(self, exc_info):
    -        self.exc_type = exc_info[0]
    -        self.exc_msg = "".join(traceback.format_exception(*exc_info))
    -
    -
    -def _worker_loop(dataset, index_queue, data_queue, collate_fn):
    -    torch.set_num_threads(1)
    -    while True:
    -        r = index_queue.get()
    -        if r is None:
    -            data_queue.put(None)
    -            break
    -        idx, batch_indices = r
    -        try:
    -            samples = collate_fn([dataset[i] for i in batch_indices])
    -        except Exception:
    -            data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
    -        else:
    -            data_queue.put((idx, samples))
    -
    -
    -def _pin_memory_loop(in_queue, out_queue, done_event):
    -    while True:
    -        try:
    -            r = in_queue.get()
    -        except:
    -            if done_event.is_set():
    -                return
    -            raise
    -        if r is None:
    -            break
    -        if isinstance(r[1], ExceptionWrapper):
    -            out_queue.put(r)
    -            continue
    -        idx, batch = r
    -        try:
    -            batch = pin_memory_batch(batch)
    -        except Exception:
    -            out_queue.put((idx, ExceptionWrapper(sys.exc_info())))
    -        else:
    -            out_queue.put((idx, batch))
    -
    -
    -def default_collate(batch):
    -    "Puts each data field into a tensor with outer dimension batch size"
    -    if torch.is_tensor(batch[0]):
    -        return torch.stack(batch, 0)
    -    elif type(batch[0]).__module__ == 'numpy':  # this allows to not import numpy
    -        return torch.stack([torch.from_numpy(b) for b in batch], 0)
    -    elif isinstance(batch[0], int):
    -        return torch.LongTensor(batch)
    -    elif isinstance(batch[0], float):
    -        return torch.DoubleTensor(batch)
    -    elif isinstance(batch[0], string_classes):
    -        return batch
    -    elif isinstance(batch[0], collections.Iterable):
    -        # if each batch element is not a tensor, then it should be a tuple
    -        # of tensors; in that case we collate each element in the tuple
    -        transposed = zip(*batch)
    -        return [default_collate(samples) for samples in transposed]
    -
    -    raise TypeError(("batch must contain tensors, numbers, or lists; found {}"
    -                     .format(type(batch[0]))))
    -
    -
    -def pin_memory_batch(batch):
    -    if torch.is_tensor(batch):
    -        return batch.pin_memory()
    -    elif isinstance(batch, string_classes):
    -        return batch
    -    elif isinstance(batch, collections.Iterable):
    -        return [pin_memory_batch(sample) for sample in batch]
    -    else:
    -        return batch
    -
    -
    -class DataLoaderIter(object):
    -    "Iterates once over the DataLoader's dataset, as specified by the sampler"
    -
    -    def __init__(self, loader):
    -        self.dataset = loader.dataset
    -        self.batch_size = loader.batch_size
    -        self.collate_fn = loader.collate_fn
    -        self.sampler = loader.sampler
    -        self.num_workers = loader.num_workers
    -        self.pin_memory = loader.pin_memory
    -        self.done_event = threading.Event()
    -
    -        self.samples_remaining = len(self.sampler)
    -        self.sample_iter = iter(self.sampler)
    -
    -        if self.num_workers > 0:
    -            self.index_queue = multiprocessing.SimpleQueue()
    -            self.data_queue = multiprocessing.SimpleQueue()
    -            self.batches_outstanding = 0
    -            self.shutdown = False
    -            self.send_idx = 0
    -            self.rcvd_idx = 0
    -            self.reorder_dict = {}
    -
    -            self.workers = [
    -                multiprocessing.Process(
    -                    target=_worker_loop,
    -                    args=(self.dataset, self.index_queue, self.data_queue, self.collate_fn))
    -                for _ in range(self.num_workers)]
    -
    -            for w in self.workers:
    -                w.daemon = True  # ensure that the worker exits on process exit
    -                w.start()
    -
    -            if self.pin_memory:
    -                in_data = self.data_queue
    -                self.data_queue = queue.Queue()
    -                self.pin_thread = threading.Thread(
    -                    target=_pin_memory_loop,
    -                    args=(in_data, self.data_queue, self.done_event))
    -                self.pin_thread.daemon = True
    -                self.pin_thread.start()
    -
    -            # prime the prefetch loop
    -            for _ in range(2 * self.num_workers):
    -                self._put_indices()
    -
    -    def __len__(self):
    -        return int(math.ceil(len(self.sampler) / float(self.batch_size)))
    -
    -    def __next__(self):
    -        if self.num_workers == 0:
    -            # same-process loading
    -            if self.samples_remaining == 0:
    -                raise StopIteration
    -            indices = self._next_indices()
    -            batch = self.collate_fn([self.dataset[i] for i in indices])
    -            if self.pin_memory:
    -                batch = pin_memory_batch(batch)
    -            return batch
    -
    -        # check if the next sample has already been generated
    -        if self.rcvd_idx in self.reorder_dict:
    -            batch = self.reorder_dict.pop(self.rcvd_idx)
    -            return self._process_next_batch(batch)
    -
    -        if self.batches_outstanding == 0:
    -            self._shutdown_workers()
    -            raise StopIteration
    -
    -        while True:
    -            assert (not self.shutdown and self.batches_outstanding > 0)
    -            idx, batch = self.data_queue.get()
    -            self.batches_outstanding -= 1
    -            if idx != self.rcvd_idx:
    -                # store out-of-order samples
    -                self.reorder_dict[idx] = batch
    -                continue
    -            return self._process_next_batch(batch)
    -
    -    next = __next__  # Python 2 compatibility
    -
    -    def __iter__(self):
    -        return self
    -
    -    def _next_indices(self):
    -        batch_size = min(self.samples_remaining, self.batch_size)
    -        batch = [next(self.sample_iter) for _ in range(batch_size)]
    -        self.samples_remaining -= len(batch)
    -        return batch
    -
    -    def _put_indices(self):
    -        assert self.batches_outstanding < 2 * self.num_workers
    -        if self.samples_remaining > 0:
    -            self.index_queue.put((self.send_idx, self._next_indices()))
    -            self.batches_outstanding += 1
    -            self.send_idx += 1
    -
    -    def _process_next_batch(self, batch):
    -        self.rcvd_idx += 1
    -        self._put_indices()
    -        if isinstance(batch, ExceptionWrapper):
    -            raise batch.exc_type(batch.exc_msg)
    -        return batch
    -
    -    def __getstate__(self):
    -        # TODO: add limited pickling support for sharing an iterator
    -        # across multiple threads for HOGWILD.
    -        # Probably the best way to do this is by moving the sample pushing
    -        # to a separate thread and then just sharing the data queue
    -        # but signalling the end is tricky without a non-blocking API
    -        raise NotImplementedError("DataLoaderIterator cannot be pickled")
    -
    -    def _shutdown_workers(self):
    -        if not self.shutdown:
    -            self.shutdown = True
    -            self.done_event.set()
    -            for _ in self.workers:
    -                self.index_queue.put(None)
    -
    -    def __del__(self):
    -        if self.num_workers > 0:
    -            self._shutdown_workers()
    -
    -
    -
    [docs]class DataLoader(object): - """ - Data loader. Combines a dataset and a sampler, and provides - single- or multi-process iterators over the dataset. - - Arguments: - dataset (Dataset): dataset from which to load the data. - batch_size (int, optional): how many samples per batch to load - (default: 1). - shuffle (bool, optional): set to ``True`` to have the data reshuffled - at every epoch (default: False). - sampler (Sampler, optional): defines the strategy to draw samples from - the dataset. If specified, the ``shuffle`` argument is ignored. - num_workers (int, optional): how many subprocesses to use for data - loading. 0 means that the data will be loaded in the main process - (default: 0) - collate_fn (callable, optional) - pin_memory (bool, optional) - """ - - def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, - num_workers=0, collate_fn=default_collate, pin_memory=False): - self.dataset = dataset - self.batch_size = batch_size - self.num_workers = num_workers - self.collate_fn = collate_fn - self.pin_memory = pin_memory - - if sampler is not None: - self.sampler = sampler - elif shuffle: - self.sampler = RandomSampler(dataset) - elif not shuffle: - self.sampler = SequentialSampler(dataset) - - def __iter__(self): - return DataLoaderIter(self) - - def __len__(self): - return int(math.ceil(len(self.sampler) / float(self.batch_size)))
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/utils/data/dataset.html b/docs/_modules/torch/utils/data/dataset.html deleted file mode 100644 index 5893f1a23253..000000000000 --- a/docs/_modules/torch/utils/data/dataset.html +++ /dev/null @@ -1,594 +0,0 @@ - - - - - - - - - - - torch.utils.data.dataset — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.utils.data.dataset

    -
    -
    [docs]class Dataset(object): - """An abstract class representing a Dataset. - - All other datasets should subclass it. All subclasses should override - ``__len__``, that provides the size of the dataset, and ``__getitem__``, - supporting integer indexing in range from 0 to len(self) exclusive. - """ - - def __getitem__(self, index): - raise NotImplementedError - - def __len__(self): - raise NotImplementedError
    - - -
    [docs]class TensorDataset(Dataset): - """Dataset wrapping data and target tensors. - - Each sample will be retrieved by indexing both tensors along the first - dimension. - - Arguments: - data_tensor (Tensor): contains sample data. - target_tensor (Tensor): contains sample targets (labels). - """ - - def __init__(self, data_tensor, target_tensor): - assert data_tensor.size(0) == target_tensor.size(0) - self.data_tensor = data_tensor - self.target_tensor = target_tensor - - def __getitem__(self, index): - return self.data_tensor[index], self.target_tensor[index] - - def __len__(self): - return self.data_tensor.size(0)
    -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/utils/ffi.html b/docs/_modules/torch/utils/ffi.html deleted file mode 100644 index 04314f478232..000000000000 --- a/docs/_modules/torch/utils/ffi.html +++ /dev/null @@ -1,743 +0,0 @@ - - - - - - - - - - - torch.utils.ffi — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.utils.ffi

    -import os
    -import glob
    -import tempfile
    -import shutil
    -from functools import wraps, reduce
    -from string import Template
    -import torch
    -import torch.cuda
    -from torch._utils import _accumulate
    -
    -try:
    -    import cffi
    -except ImportError:
    -    raise ImportError("torch.utils.ffi requires the cffi package")
    -
    -
    -if cffi.__version_info__ < (1, 4, 0):
    -    raise ImportError("torch.utils.ffi requires cffi version >= 1.4, but "
    -                      "got " + '.'.join(map(str, cffi.__version_info__)))
    -
    -
    -def _generate_typedefs():
    -    typedefs = []
    -    for t in ['Double', 'Float', 'Long', 'Int', 'Short', 'Char', 'Byte']:
    -        for lib in ['TH', 'THCuda']:
    -            for kind in ['Tensor', 'Storage']:
    -                python_name = t + kind
    -                if t == 'Float' and lib == 'THCuda':
    -                    th_name = 'THCuda' + kind
    -                else:
    -                    th_name = lib + t + kind
    -                th_struct = 'struct ' + th_name
    -
    -                typedefs += ['typedef {} {};'.format(th_struct, th_name)]
    -                module = torch if lib == 'TH' else torch.cuda
    -                python_class = getattr(module, python_name)
    -                _cffi_to_torch[th_struct] = python_class
    -                _torch_to_cffi[python_class] = th_struct
    -    return '\n'.join(typedefs) + '\n'
    -_cffi_to_torch = {}
    -_torch_to_cffi = {}
    -_typedefs = _generate_typedefs()
    -
    -
    -PY_MODULE_TEMPLATE = Template("""
    -from torch.utils.ffi import _wrap_function
    -from .$cffi_wrapper_name import lib as _lib, ffi as _ffi
    -
    -__all__ = []
    -def _import_symbols(locals):
    -    for symbol in dir(_lib):
    -        fn = getattr(_lib, symbol)
    -        locals[symbol] = _wrap_function(fn, _ffi)
    -        __all__.append(symbol)
    -
    -_import_symbols(locals())
    -""")
    -
    -
    -def _setup_wrapper(with_cuda):
    -    here = os.path.abspath(os.path.dirname(__file__))
    -    lib_dir = os.path.join(here, '..', '..', 'lib')
    -    include_dirs = [
    -        os.path.join(lib_dir, 'include'),
    -        os.path.join(lib_dir, 'include', 'TH'),
    -    ]
    -
    -    wrapper_source = '#include <TH/TH.h>\n'
    -    if with_cuda:
    -        import torch.cuda
    -        wrapper_source += '#include <THC/THC.h>\n'
    -        cuda_include_dirs = glob.glob('/usr/local/cuda/include')
    -        cuda_include_dirs += glob.glob('/Developer/NVIDIA/CUDA-*/include')
    -        include_dirs.append(os.path.join(lib_dir, 'include', 'THC'))
    -        include_dirs.extend(cuda_include_dirs)
    -    return wrapper_source, include_dirs
    -
    -
    -def _create_module_dir(base_path, fullname):
    -    module, _, name = fullname.rpartition('.')
    -    if not module:
    -        target_dir = name
    -    else:
    -        target_dir = reduce(os.path.join, fullname.split('.'))
    -    target_dir = os.path.join(base_path, target_dir)
    -    try:
    -        os.makedirs(target_dir)
    -    except os.error:
    -        pass
    -    for dirname in _accumulate(fullname.split('.'), os.path.join):
    -        init_file = os.path.join(base_path, dirname, '__init__.py')
    -        open(init_file, 'a').close()  # Create file if it doesn't exist yet
    -    return name, target_dir
    -
    -
    -def _build_extension(ffi, cffi_wrapper_name, target_dir, verbose):
    -    try:
    -        tmpdir = tempfile.mkdtemp()
    -        libname = cffi_wrapper_name + '.so'
    -        ffi.compile(tmpdir=tmpdir, verbose=verbose, target=libname)
    -        shutil.copy(os.path.join(tmpdir, libname),
    -                    os.path.join(target_dir, libname))
    -    finally:
    -        shutil.rmtree(tmpdir)
    -
    -
    -def _make_python_wrapper(name, cffi_wrapper_name, target_dir):
    -    py_source = PY_MODULE_TEMPLATE.substitute(name=name,
    -                                              cffi_wrapper_name=cffi_wrapper_name)
    -    with open(os.path.join(target_dir, '__init__.py'), 'w') as f:
    -        f.write(py_source)
    -
    -
    -
    [docs]def create_extension(name, headers, sources, verbose=True, with_cuda=False, - package=False, relative_to='.', **kwargs): - """Creates and configures a cffi.FFI object, that builds PyTorch extension. - - Arguments: - name (str): package name. Can be a nested module e.g. ``.ext.my_lib``. - headers (str or List[str]): list of headers, that contain only exported - functions - sources (List[str]): list of sources to compile. - verbose (bool, optional): if set to ``False``, no output will be printed - (default: True). - with_cuda (bool, optional): set to ``True`` to compile with CUDA headers - (default: False) - package (bool, optional): set to ``True`` to build in package mode (for modules - meant to be installed as pip packages) (default: False). - relative_to (str, optional): path of the build file. Required when - ``package is True``. It's best to use ``__file__`` for this argument. - kwargs: additional arguments that are passed to ffi to declare the - extension. See `Extension API reference`_ for details. - - .. _`Extension API reference`: https://docs.python.org/3/distutils/apiref.html#distutils.core.Extension - """ - base_path = os.path.abspath(os.path.dirname(relative_to)) - name_suffix, target_dir = _create_module_dir(base_path, name) - if not package: - cffi_wrapper_name = '_' + name_suffix - else: - cffi_wrapper_name = (name.rpartition('.')[0] + - '.{0}._{0}'.format(name_suffix)) - - wrapper_source, include_dirs = _setup_wrapper(with_cuda) - include_dirs.extend(kwargs.pop('include_dirs', [])) - - if isinstance(headers, str): - headers = [headers] - all_headers_source = '' - for header in headers: - with open(os.path.join(base_path, header), 'r') as f: - all_headers_source += f.read() + '\n\n' - - ffi = cffi.FFI() - sources = [os.path.join(base_path, src) for src in sources] - ffi.set_source(cffi_wrapper_name, wrapper_source + all_headers_source, - sources=sources, - include_dirs=include_dirs, **kwargs) - ffi.cdef(_typedefs + all_headers_source) - - _make_python_wrapper(name_suffix, '_' + name_suffix, target_dir) - - def build(): - _build_extension(ffi, cffi_wrapper_name, target_dir, verbose) - ffi.build = build - return ffi
    - - -def _wrap_function(function, ffi): - @wraps(function) - def safe_call(*args, **kwargs): - args = tuple(ffi.cast(_torch_to_cffi.get(type(arg), 'void') + '*', arg._cdata) - if torch.is_tensor(arg) or torch.is_storage(arg) - else arg - for arg in args) - args = (function,) + args - result = torch._C._safe_call(*args, **kwargs) - if isinstance(result, ffi.CData): - typeof = ffi.typeof(result) - if typeof.kind == 'pointer': - cdata = int(ffi.cast('uintptr_t', result)) - cname = typeof.item.cname - if cname in _cffi_to_torch: - return _cffi_to_torch[cname](cdata=cdata) - return result - return safe_call -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_modules/torch/utils/model_zoo.html b/docs/_modules/torch/utils/model_zoo.html deleted file mode 100644 index 65edd0ad4032..000000000000 --- a/docs/_modules/torch/utils/model_zoo.html +++ /dev/null @@ -1,666 +0,0 @@ - - - - - - - - - - - torch.utils.model_zoo — PyTorch 0.1.10 documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    - - - - -
    - - - - - - -
    -
    - - - - - - - - - - - - - - - - -
    - - - - -
    -
    -
    -
    - -

    Source code for torch.utils.model_zoo

    -import torch
    -
    -import hashlib
    -import os
    -import re
    -import shutil
    -import sys
    -import tempfile
    -if sys.version_info[0] == 2:
    -    from urlparse import urlparse
    -    from urllib2 import urlopen
    -else:
    -    from urllib.request import urlopen
    -    from urllib.parse import urlparse
    -try:
    -    from tqdm import tqdm
    -except ImportError:
    -    tqdm = None  # defined below
    -
    -# matches bfd8deac from resnet18-bfd8deac.pth
    -HASH_REGEX = re.compile(r'-([a-f0-9]*)\.')
    -
    -
    -
    [docs]def load_url(url, model_dir=None): - r"""Loads the Torch serialized object at the given URL. - - If the object is already present in `model_dir`, it's deserialied and - returned. The filename part of the URL should follow the naming convention - ``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more - digits of the SHA256 hash of the contents of the file. The hash is used to - ensure unique names and to verify the contents of the file. - - The default value of `model_dir` is ``$TORCH_HOME/models`` where - ``$TORCH_HOME`` defaults to ``~/.torch``. The default directory can be - overriden with the ``$TORCH_MODEL_ZOO`` environement variable. - - Args: - url (string): URL of the object to download - model_dir (string, optional): directory in which to save the object - - Example: - >>> state_dict = torch.utils.model_zoo.load_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth') - - """ - if model_dir is None: - torch_home = os.path.expanduser(os.getenv('TORCH_HOME', '~/.torch')) - model_dir = os.getenv('TORCH_MODEL_ZOO', os.path.join(torch_home, 'models')) - if not os.path.exists(model_dir): - os.makedirs(model_dir) - parts = urlparse(url) - filename = os.path.basename(parts.path) - cached_file = os.path.join(model_dir, filename) - if not os.path.exists(cached_file): - sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file)) - hash_prefix = HASH_REGEX.search(filename).group(1) - _download_url_to_file(url, cached_file, hash_prefix) - return torch.load(cached_file)
    - - -def _download_url_to_file(url, dst, hash_prefix): - u = urlopen(url) - meta = u.info() - if hasattr(meta, 'getheaders'): - file_size = int(meta.getheaders("Content-Length")[0]) - else: - file_size = int(meta.get_all("Content-Length")[0]) - - f = tempfile.NamedTemporaryFile(delete=False) - try: - sha256 = hashlib.sha256() - with tqdm(total=file_size) as pbar: - while True: - buffer = u.read(8192) - if len(buffer) == 0: - break - f.write(buffer) - sha256.update(buffer) - pbar.update(len(buffer)) - - f.close() - digest = sha256.hexdigest() - if digest[:len(hash_prefix)] != hash_prefix: - raise RuntimeError('invalid hash value (expected "{}", got "{}")' - .format(hash_prefix, digest)) - shutil.move(f.name, dst) - finally: - f.close() - if os.path.exists(f.name): - os.remove(f.name) - - -if tqdm is None: - # fake tqdm if it's not installed - class tqdm(object): - - def __init__(self, total): - self.total = total - self.n = 0 - - def update(self, n): - self.n += n - sys.stderr.write("\r{0:.1f}%".format(100 * self.n / float(self.total))) - sys.stderr.flush() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - sys.stderr.write('\n') -
    - -
    -
    - -
    -
    -
    - - -
    - -
    -

    - © Copyright 2017, Torch Contributors. - -

    -
    - Built with Sphinx using a theme provided by Read the Docs. - -
    - -
    -
    - -
    - -
    - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_sources/autograd.rst.txt b/docs/_sources/autograd.rst.txt deleted file mode 100644 index e5b102b376c4..000000000000 --- a/docs/_sources/autograd.rst.txt +++ /dev/null @@ -1,53 +0,0 @@ -.. role:: hidden - :class: hidden-section - -Automatic differentiation package - torch.autograd -================================================== - -.. automodule:: torch.autograd -.. currentmodule:: torch.autograd - -.. autofunction:: backward - -Variable --------- - -API compatibility -^^^^^^^^^^^^^^^^^ - -Variable API is nearly the same as regular Tensor API (with the exception -of a couple in-place methods, that would overwrite inputs required for -gradient computation). In most cases Tensors can be safely replaced with -Variables and the code will remain to work just fine. Because of this, -we're not documenting all the operations on variables, and you should -refere to :class:`torch.Tensor` docs for this purpose. - -In-place operations on Variables -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Supporting in-place operations in autograd is a hard matter, and we discourage -their use in most cases. Autograd's aggressive buffer freeing and reuse makes -it very efficient and there are very few occasions when in-place operations -actually lower memory usage by any significant amount. Unless you're operating -under heavy memory pressure, you might never need to use them. - -In-place correctness checks -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -All :class:`Variable` s keep track of in-place operations applied to them, and -if the implementation detects that a variable was saved for backward in one of -the functions, but it was modified in-place afterwards, an error will be raised -once backward pass is started. This ensures that if you're using in-place -functions and not seing any errors, you can be sure that the computed gradients -are correct. - - -.. autoclass:: Variable - :members: - -:hidden:`Function` ------------------- - -.. autoclass:: Function - :members: - diff --git a/docs/_sources/cuda.rst.txt b/docs/_sources/cuda.rst.txt deleted file mode 100644 index 9f94d76779ad..000000000000 --- a/docs/_sources/cuda.rst.txt +++ /dev/null @@ -1,27 +0,0 @@ -torch.cuda -=================================== - -.. currentmodule:: torch.cuda - -.. automodule:: torch.cuda - :members: - -Communication collectives -------------------------- - -.. autofunction:: torch.cuda.comm.broadcast - -.. autofunction:: torch.cuda.comm.reduce_add - -.. autofunction:: torch.cuda.comm.scatter - -.. autofunction:: torch.cuda.comm.gather - -Streams and events ------------------- - -.. autoclass:: Stream - :members: - -.. autoclass:: Event - :members: diff --git a/docs/_sources/data.rst.txt b/docs/_sources/data.rst.txt deleted file mode 100644 index e5e1675b50e2..000000000000 --- a/docs/_sources/data.rst.txt +++ /dev/null @@ -1,7 +0,0 @@ -torch.utils.data -=================================== - -.. automodule:: torch.utils.data -.. autoclass:: Dataset -.. autoclass:: TensorDataset -.. autoclass:: DataLoader diff --git a/docs/_sources/ffi.rst.txt b/docs/_sources/ffi.rst.txt deleted file mode 100644 index ae7c0e9ddacd..000000000000 --- a/docs/_sources/ffi.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -torch.utils.ffi -=============== - -.. currentmodule:: torch.utils.ffi -.. autofunction:: create_extension - diff --git a/docs/_sources/index.rst.txt b/docs/_sources/index.rst.txt deleted file mode 100644 index 4adedb7618cf..000000000000 --- a/docs/_sources/index.rst.txt +++ /dev/null @@ -1,54 +0,0 @@ -.. PyTorch documentation master file, created by - sphinx-quickstart on Fri Dec 23 13:31:47 2016. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -:github_url: https://github.com/pytorch/pytorch - -PyTorch documentation -=================================== - -PyTorch is an optimized tensor library for deep learning using GPUs and CPUs. - -.. toctree:: - :glob: - :maxdepth: 1 - :caption: Notes - - notes/* - - -.. toctree:: - :maxdepth: 1 - :caption: Package Reference - - torch - tensors - storage - nn - optim - torch.autograd - torch.multiprocessing - torch.legacy - cuda - ffi - data - model_zoo - -.. toctree:: - :glob: - :maxdepth: 1 - :caption: torchvision Reference - - torchvision/torchvision - torchvision/datasets - torchvision/models - torchvision/transforms - torchvision/utils - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` diff --git a/docs/_sources/legacy.rst.txt b/docs/_sources/legacy.rst.txt deleted file mode 100644 index bc1aad54fb2b..000000000000 --- a/docs/_sources/legacy.rst.txt +++ /dev/null @@ -1,4 +0,0 @@ -Legacy package - torch.legacy -=================================== - -.. automodule:: torch.legacy diff --git a/docs/_sources/model_zoo.rst.txt b/docs/_sources/model_zoo.rst.txt deleted file mode 100644 index 3997a369d991..000000000000 --- a/docs/_sources/model_zoo.rst.txt +++ /dev/null @@ -1,5 +0,0 @@ -torch.utils.model_zoo -=================================== - -.. automodule:: torch.utils.model_zoo -.. autofunction:: load_url diff --git a/docs/_sources/multiprocessing.rst.txt b/docs/_sources/multiprocessing.rst.txt deleted file mode 100644 index 45035a0b470e..000000000000 --- a/docs/_sources/multiprocessing.rst.txt +++ /dev/null @@ -1,88 +0,0 @@ -Multiprocessing package - torch.multiprocessing -=============================================== - -.. automodule:: torch.multiprocessing -.. currentmodule:: torch.multiprocessing - -.. warning:: - - If the main process exits abruptly (e.g. because of an incoming signal), - Python's ``multiprocessing`` sometimes fails to clean up its children. - It's a known caveat, so if you're seeing any resource leaks after - interrupting the interpreter, it probably means that this has just happened - to you. - -Strategy management -------------------- - -.. autofunction:: get_all_sharing_strategies -.. autofunction:: get_sharing_strategy -.. autofunction:: set_sharing_strategy - -Sharing CUDA tensors --------------------- - -Sharing CUDA tensors between processes is supported only in Python 3, using -a ``spawn`` or ``forkserver`` start methods. :mod:`python:multiprocessing` in -Python 2 can only create subprocesses using ``fork``, and it's not supported -by the CUDA runtime. - -.. warning:: - - CUDA API requires that the allocation exported to other processes remains - valid as long as it's used by them. You should be careful and ensure that - CUDA tensors you shared don't go out of scope as long as it's necessary. - This shouldn't be a problem for sharing model parameters, but passing other - kinds of data should be done with care. Note that this restriction doesn't - apply to shared CPU memory. - - -Sharing strategies ------------------- - -This section provides a brief overview into how different sharing strategies -work. Note that it applies only to CPU tensor - CUDA tensors will always use -the CUDA API, as that's the only way they can be shared. - -File descriptor - ``file_descriptor`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - - -.. note:: - - This is the default strategy (except for macOS and OS X where it's not - supported). - -This strategy will use file descriptors as shared memory handles. Whenever a -storage is moved to shared memory, a file descriptor obtained from ``shm_open`` -is cached with the object, and when it's going to be sent to other processes, -the file descriptor will be transferred (e.g. via UNIX sockets) to it. The -receiver will also cache the file descriptor and ``mmap`` it, to obtain a shared -view onto the storage data. - -Note that if there will be a lot of tensors shared, this strategy will keep a -large number of file descriptors open most of the time. If your system has low -limits for the number of open file descriptors, and you can't rise them, you -should use the ``file_system`` strategy. - -File system - ``file_system`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -This strategy will use file names given to ``shm_open`` to identify the shared -memory regions. This has a benefit of not requiring the implementation to cache -the file descriptors obtained from it, but at the same time is prone to shared -memory leaks. The file can't be deleted right after its creation, because other -processes need to access it to open their views. If the processes fatally -crash, or are killed, and don't call the storage destructors, the files will -remain in the system. This is very serious, because they keep using up the -memory until the system is restarted, or they're freed manually. - -To counter the problem of shared memory file leaks, :mod:`torch.multiprocessing` -will spawn a daemon named ``torch_shm_manager`` that will isolate itself from -the current process group, and will keep track of all shared memory allocations. -Once all processes connected to it exit, it will wait a moment to ensure there -will be no new connections, and will iterate over all shared memory files -allocated by the group. If it finds that any of them still exist, they will be -deallocated. We've tested this method and it prooved to be robust to various -failures. Still, if your system has high enough limits, and ``file_descriptor`` -is a supported strategy, we do not recommend switching to this one. diff --git a/docs/_sources/nn.rst.txt b/docs/_sources/nn.rst.txt deleted file mode 100644 index 4e8a8450137a..000000000000 --- a/docs/_sources/nn.rst.txt +++ /dev/null @@ -1,785 +0,0 @@ -.. role:: hidden - :class: hidden-section - -torch.nn -=================================== - -.. automodule:: torch.nn -.. currentmodule:: torch.nn - -Parameters ----------- - -.. autoclass:: Parameter - :members: - -Containers ----------------------------------- - -:hidden:`Module` -~~~~~~~~~~~~~~~~ - -.. autoclass:: Module - :members: - -:hidden:`Sequential` -~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: Sequential - :members: - -:hidden:`ModuleList` -~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: ModuleList - :members: - -:hidden:`ParameterList` -~~~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: ParameterList - :members: - -Convolution Layers ----------------------------------- - -:hidden:`Conv1d` -~~~~~~~~~~~~~~~~ - -.. autoclass:: Conv1d - :members: - -:hidden:`Conv2d` -~~~~~~~~~~~~~~~~ - -.. autoclass:: Conv2d - :members: - -:hidden:`Conv3d` -~~~~~~~~~~~~~~~~ - -.. autoclass:: Conv3d - :members: - -:hidden:`ConvTranspose1d` -~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: ConvTranspose1d - :members: - -:hidden:`ConvTranspose2d` -~~~~~~~~~~~~~~~~~~~~~~~~~ - - -.. autoclass:: ConvTranspose2d - :members: - -:hidden:`ConvTranspose3d` -~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: ConvTranspose3d - :members: - - -Pooling Layers ----------------------------------- - -:hidden:`MaxPool1d` -~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: MaxPool1d - :members: - -:hidden:`MaxPool2d` -~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: MaxPool2d - :members: - -:hidden:`MaxPool3d` -~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: MaxPool3d - :members: - -:hidden:`MaxUnpool1d` -~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: MaxUnpool1d - :members: - -:hidden:`MaxUnpool2d` -~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: MaxUnpool2d - :members: - -:hidden:`MaxUnpool3d` -~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: MaxUnpool3d - :members: - -:hidden:`AvgPool1d` -~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: AvgPool1d - :members: - -:hidden:`AvgPool2d` -~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: AvgPool2d - :members: - -:hidden:`AvgPool3d` -~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: AvgPool3d - :members: - -:hidden:`FractionalMaxPool2d` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: FractionalMaxPool2d - :members: - -:hidden:`LPPool2d` -~~~~~~~~~~~~~~~~~~ - -.. autoclass:: LPPool2d - :members: - -Non-linear Activations ----------------------------------- - -:hidden:`ReLU` -~~~~~~~~~~~~~~ - -.. autoclass:: ReLU - :members: - -:hidden:`ReLU6` -~~~~~~~~~~~~~~~ - -.. autoclass:: ReLU6 - :members: - -:hidden:`ELU` -~~~~~~~~~~~~~ - -.. autoclass:: ELU - :members: - -:hidden:`PReLU` -~~~~~~~~~~~~~~~ - -.. autoclass:: PReLU - :members: - -:hidden:`LeakyReLU` -~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: LeakyReLU - :members: - -:hidden:`Threshold` -~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: Threshold - :members: - -:hidden:`Hardtanh` -~~~~~~~~~~~~~~~~~~ - -.. autoclass:: Hardtanh - :members: - -:hidden:`Sigmoid` -~~~~~~~~~~~~~~~~~ - -.. autoclass:: Sigmoid - :members: - -:hidden:`Tanh` -~~~~~~~~~~~~~~ - -.. autoclass:: Tanh - :members: - -:hidden:`LogSigmoid` -~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: LogSigmoid - :members: - -:hidden:`Softplus` -~~~~~~~~~~~~~~~~~~ - -.. autoclass:: Softplus - :members: - -:hidden:`Softshrink` -~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: Softshrink - :members: - -:hidden:`Softsign` -~~~~~~~~~~~~~~~~~~ - -.. autoclass:: Softsign - :members: - -:hidden:`Tanhshrink` -~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: Tanhshrink - :members: - -:hidden:`Softmin` -~~~~~~~~~~~~~~~~~ - -.. autoclass:: Softmin - :members: - -:hidden:`Softmax` -~~~~~~~~~~~~~~~~~ - -.. autoclass:: Softmax - :members: - -:hidden:`LogSoftmax` -~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: LogSoftmax - :members: - - -Normalization layers ----------------------------------- - -:hidden:`BatchNorm1d` -~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: BatchNorm1d - :members: - -:hidden:`BatchNorm2d` -~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: BatchNorm2d - :members: - -:hidden:`BatchNorm3d` -~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: BatchNorm3d - :members: - - -Recurrent layers ----------------------------------- - -:hidden:`RNN` -~~~~~~~~~~~~~ - -.. autoclass:: RNN - :members: - -:hidden:`LSTM` -~~~~~~~~~~~~~~ - -.. autoclass:: LSTM - :members: - -:hidden:`GRU` -~~~~~~~~~~~~~ - -.. autoclass:: GRU - :members: - -:hidden:`RNNCell` -~~~~~~~~~~~~~~~~~ - -.. autoclass:: RNNCell - :members: - -:hidden:`LSTMCell` -~~~~~~~~~~~~~~~~~~ - -.. autoclass:: LSTMCell - :members: - -:hidden:`GRUCell` -~~~~~~~~~~~~~~~~~ - -.. autoclass:: GRUCell - :members: - -Linear layers ----------------------------------- - -:hidden:`Linear` -~~~~~~~~~~~~~~~~ - -.. autoclass:: Linear - :members: - - -Dropout layers ----------------------------------- - -:hidden:`Dropout` -~~~~~~~~~~~~~~~~~ - -.. autoclass:: Dropout - :members: - -:hidden:`Dropout2d` -~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: Dropout2d - :members: - -:hidden:`Dropout3d` -~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: Dropout3d - :members: - - -Sparse layers ----------------------------------- - -:hidden:`Embedding` -~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: Embedding - :members: - - -Loss functions ----------------------------------- - -:hidden:`L1Loss` -~~~~~~~~~~~~~~~~ - -.. autoclass:: L1Loss - :members: - -:hidden:`MSELoss` -~~~~~~~~~~~~~~~~~ - -.. autoclass:: MSELoss - :members: - -:hidden:`CrossEntropyLoss` -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: CrossEntropyLoss - :members: - -:hidden:`NLLLoss` -~~~~~~~~~~~~~~~~~ - -.. autoclass:: NLLLoss - :members: - -:hidden:`NLLLoss2d` -~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: NLLLoss2d - :members: - -:hidden:`KLDivLoss` -~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: KLDivLoss - :members: - -:hidden:`BCELoss` -~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: BCELoss - :members: - -:hidden:`MarginRankingLoss` -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: MarginRankingLoss - :members: - -:hidden:`HingeEmbeddingLoss` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: HingeEmbeddingLoss - :members: - -:hidden:`MultiLabelMarginLoss` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: MultiLabelMarginLoss - :members: - -:hidden:`SmoothL1Loss` -~~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: SmoothL1Loss - :members: - -:hidden:`SoftMarginLoss` -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: SoftMarginLoss - :members: - -:hidden:`MultiLabelSoftMarginLoss` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: MultiLabelSoftMarginLoss - :members: - -:hidden:`CosineEmbeddingLoss` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: CosineEmbeddingLoss - :members: - -:hidden:`MultiMarginLoss` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: MultiMarginLoss - :members: - - -Vision layers ----------------- - -:hidden:`PixelShuffle` -~~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: PixelShuffle - :members: - -:hidden:`UpsamplingNearest2d` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: UpsamplingNearest2d - :members: - -:hidden:`UpsamplingBilinear2d` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: UpsamplingBilinear2d - :members: - - -Multi-GPU layers ----------------- - -:hidden:`DataParallel` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: DataParallel - :members: - - -Utilities ---------- - -:hidden:`clip_grad_norm` -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: torch.nn.utils.clip_grad_norm - - -.. currentmodule:: torch.nn.utils.rnn - -:hidden:`PackedSequence` -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: torch.nn.utils.rnn.PackedSequence - - -:hidden:`pack_padded_sequence` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: torch.nn.utils.rnn.pack_padded_sequence - - -:hidden:`pad_packed_sequence` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: torch.nn.utils.rnn.pad_packed_sequence - - -torch.nn.functional -=================== - -.. currentmodule:: torch.nn.functional - -Convolution functions ----------------------------------- - -:hidden:`conv1d` -~~~~~~~~~~~~~~~~ - -.. autofunction:: conv1d - -:hidden:`conv2d` -~~~~~~~~~~~~~~~~ - -.. autofunction:: conv2d - -:hidden:`conv3d` -~~~~~~~~~~~~~~~~ - -.. autofunction:: conv3d - -:hidden:`conv_transpose1d` -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: conv_transpose1d - -:hidden:`conv_transpose2d` -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: conv_transpose2d - -:hidden:`conv_transpose3d` -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: conv_transpose3d - -Pooling functions ----------------------------------- - -:hidden:`avg_pool1d` -~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: avg_pool1d - -:hidden:`avg_pool2d` -~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: avg_pool2d - -:hidden:`avg_pool3d` -~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: avg_pool3d - -:hidden:`max_pool1d` -~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: max_pool1d - -:hidden:`max_pool2d` -~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: max_pool2d - -:hidden:`max_pool3d` -~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: max_pool3d - -:hidden:`max_unpool1d` -~~~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: max_unpool1d - -:hidden:`max_unpool2d` -~~~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: max_unpool2d - -:hidden:`max_unpool3d` -~~~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: max_unpool3d - -:hidden:`lp_pool2d` -~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: lp_pool2d - -Non-linear activation functions -------------------------------- - -:hidden:`threshold` -~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: threshold - - -:hidden:`relu` -~~~~~~~~~~~~~~ - -.. autofunction:: relu - -:hidden:`hardtanh` -~~~~~~~~~~~~~~~~~~ - -.. autofunction:: hardtanh - -:hidden:`relu6` -~~~~~~~~~~~~~~~ - -.. autofunction:: relu6 - -:hidden:`elu` -~~~~~~~~~~~~~ - -.. autofunction:: elu - -:hidden:`leaky_relu` -~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: leaky_relu - -:hidden:`prelu` -~~~~~~~~~~~~~~~ - -.. autofunction:: prelu - -:hidden:`rrelu` -~~~~~~~~~~~~~~~ - -.. autofunction:: rrelu - -:hidden:`logsigmoid` -~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: logsigmoid - -:hidden:`hardshrink` -~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: hardshrink - -:hidden:`tanhshrink` -~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: tanhshrink - -:hidden:`softsign` -~~~~~~~~~~~~~~~~~~ - -.. autofunction:: softsign - -:hidden:`softplus` -~~~~~~~~~~~~~~~~~~ - -.. autofunction:: softplus - -:hidden:`softmin` -~~~~~~~~~~~~~~~~~ - -.. autofunction:: softmin - -:hidden:`softmax` -~~~~~~~~~~~~~~~~~ - -.. autofunction:: softmax - -:hidden:`softshrink` -~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: softshrink - -:hidden:`log_softmax` -~~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: log_softmax - -:hidden:`tanh` -~~~~~~~~~~~~~~ - -.. autofunction:: tanh - -:hidden:`sigmoid` -~~~~~~~~~~~~~~~~~ - -.. autofunction:: sigmoid - -Normalization functions ------------------------ - -:hidden:`batch_norm` -~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: batch_norm - -Linear functions ----------------- - -:hidden:`linear` -~~~~~~~~~~~~~~~~ - -.. autofunction:: linear - -Dropout functions ------------------ - -:hidden:`dropout` -~~~~~~~~~~~~~~~~~ - -.. autofunction:: dropout - -Loss functions --------------- - -:hidden:`nll_loss` -~~~~~~~~~~~~~~~~~~ - -.. autofunction:: nll_loss - - -:hidden:`kl_div` -~~~~~~~~~~~~~~~~ - -.. autofunction:: kl_div - -:hidden:`cross_entropy` -~~~~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: cross_entropy - -:hidden:`binary_cross_entropy` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: binary_cross_entropy - -:hidden:`smooth_l1_loss` -~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: smooth_l1_loss - -Vision functions ----------------- - -:hidden:`pixel_shuffle` -~~~~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: pixel_shuffle - -:hidden:`pad` -~~~~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: pad - -torch.nn.init -============= - -.. currentmodule:: torch.nn.init -.. autofunction:: uniform -.. autofunction:: normal -.. autofunction:: constant -.. autofunction:: xavier_uniform -.. autofunction:: xavier_normal -.. autofunction:: kaiming_uniform -.. autofunction:: kaiming_normal -.. autofunction:: orthogonal -.. autofunction:: sparse diff --git a/docs/_sources/notes/autograd.rst.txt b/docs/_sources/notes/autograd.rst.txt deleted file mode 100644 index d560a59a54c3..000000000000 --- a/docs/_sources/notes/autograd.rst.txt +++ /dev/null @@ -1,144 +0,0 @@ -Autograd mechanics -================== - -This note will present an overview of how autograd works and records the -operations. It's not strictly necessary to understand all this, but we recommend -getting familiar with it, as it will help you write more efficient, cleaner -programs, and can aid you in debugging. - -.. _excluding-subgraphs: - -Excluding subgraphs from backward -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Every Variable has two flags: :attr:`requires_grad` and :attr:`volatile`. -They both allow for fine grained exclusion of subgraphs from gradient -computation and can increase efficiency. - -.. _excluding-requires_grad: - -``requires_grad`` -~~~~~~~~~~~~~~~~~ - -If there's a single input to an operation that requires gradient, its output -will also require gradient. Conversely, only if all inputs don't require -gradient, the output also won't require it. Backward computation is never -performed in the subgraphs, where all Variables didn't require gradients. - -.. code:: - - >>> x = Variable(torch.randn(5, 5)) - >>> y = Variable(torch.randn(5, 5)) - >>> z = Variable(torch.randn(5, 5), requires_grad=True) - >>> a = x + y - >>> a.requires_grad - False - >>> b = a + z - >>> b.requires_grad - True - -This is especially useful when you want to freeze part of your model, or you -know in advance that you're not going to use gradients w.r.t. some parameters. -For example if you want to finetune a pretrained CNN, it's enough to switch the -:attr:`requires_grad` flags in the frozen base, and no intermediate buffers will -be saved, until the computation gets to the last layer, where the affine -transform will use weights that require gradient, and the output of the network -will also require them. - -.. code:: - - model = torchvision.models.resnet18(pretrained=True) - for param in model.parameters(): - param.requires_grad = False - # Replace the last fully-connected layer - # Parameters of newly constructed modules have requires_grad=True by default - model.fc = nn.Linear(512, 100) - - # Optimize only the classifier - optimizer = optim.SGD(model.fc.parameters(), lr=1e-2, momentum=0.9) - -``volatile`` -~~~~~~~~~~~~ - -Volatile is recommended for purely inference mode, when you're sure you won't -be even calling `.backward()`. It's more efficient than any other autograd -setting - it will use the absolute minimal amount of memory to evaluate the -model. ``volatile`` also determines that ``requires_grad is False``. - -Volatile differs from :ref:`excluding-requires_grad` in how the flag propagates. -If there's even a single volatile input to an operation, its output is also -going to be volatile. Volatility spreads accross the graph much easier than -non-requiring gradient - you only need a **single** volatile leaf to have a -volatile output, while you need **all** leaves to not require gradient to -have an output the doesn't require gradient. Using volatile flag you don't -need to change any settings of your model parameters to use it for -inference. It's enough to create a volatile input, and this will ensure that -no intermediate states are saved. - -.. code:: - - >>> regular_input = Variable(torch.randn(5, 5)) - >>> volatile_input = Variable(torch.randn(5, 5), volatile=True) - >>> model = torchvision.models.resnet18(pretrained=True) - >>> model(regular_input).requires_grad - True - >>> model(volatile_input).requires_grad - False - >>> model(volatile_input).volatile - True - >>> model(volatile_input).creator is None - True - -How autograd encodes the history -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Each Variable has a ``.creator`` attribute, that points to the function, of -which it is an output. This is an entry point to a directed acyclic graph (DAG) -consisting of :class:`Function` objects as nodes, and references between them -being the edges. Every time an operation is performed, a new :class:`Function` -representing it is instantiated, its :meth:`~torch.autograd.Function.forward` -method is called, and its output :class:`Variable` s creators are set to it. -Then, by following the path from any :class:`Variable` to the leaves, it is -possible to reconstruct the sequence of operations that has created the data, -and automatically compute the gradients. - -An important thing to note is that the graph is recreated from scratch at every -iteration, and this is exactly what allows for using arbitrary Python control -flow statements, that can change the overall shape and size of the graph at -every iteration. You don't have to encode all possible paths before you -launch the training - what you run is what you differentiate. - -In-place operations on Variables -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Supporting in-place operations in autograd is a hard matter, and we discourage -their use in most cases. Autograd's aggressive buffer freeing and reuse makes -it very efficient and there are very few occasions when in-place operations -actually lower memory usage by any significant amount. Unless you're operating -under heavy memory pressure, you might never need to use them. - -There are two main reasons that limit the applicability of in-place operations: - -1. Overwriting values required to compute gradients. This is why variables don't - support ``log_``. Its gradient formula requires the original input, and while - it is possible to recreate it by computing the inverse operation, it is - numerically unstable, and requires additional work that often defeats the - purpose of using these functions. - -2. Every in-place operation actually requires the implementation to rewrite the - computational graph. Out-of-place versions simply allocate new objects and - keep references to the old graph, while in-place operations, require - changing the creator of all inputs to the :class:`Function` representing - this operation. This can be tricky, especially if there are many Variables - that reference the same storage (e.g. created by indexing or transposing), - and in-place functions will actually raise an error if the storage of - modified inputs is referenced by any other :class:`Variable`. - -In-place correctness checks -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Every variable keeps a version counter, that is incremented every time it's -marked dirty in any operation. When a Function saves any tensors for backward, -a version counter of their containing Variable is saved as well. Once you access -``self.saved_tensors`` it is checked, and if it's greater than the saved value -an error is raised. diff --git a/docs/_sources/notes/cuda.rst.txt b/docs/_sources/notes/cuda.rst.txt deleted file mode 100644 index 4db82e61b5c3..000000000000 --- a/docs/_sources/notes/cuda.rst.txt +++ /dev/null @@ -1,83 +0,0 @@ -.. _cuda-semantics: - -CUDA semantics -============== - -:mod:`torch.cuda` keeps track of currently selected GPU, and all CUDA tensors -you allocate will be created on it. The selected device can be changed with a -:any:`torch.cuda.device` context manager. - -However, once a tensor is allocated, you can do operations on it irrespectively -of your selected device, and the results will be always placed in on the same -device as the tensor. - -Cross-GPU operations are not allowed by default, with the only exception of -:meth:`~torch.Tensor.copy_`. Unless you enable peer-to-peer memory accesses -any attempts to launch ops on tensors spread across different devices will -raise an error. - -Below you can find a small example showcasing this:: - - x = torch.cuda.FloatTensor(1) - # x.get_device() == 0 - y = torch.FloatTensor(1).cuda() - # y.get_device() == 0 - - with torch.cuda.device(1): - # allocates a tensor on GPU 1 - a = torch.cuda.FloatTensor(1) - - # transfers a tensor from CPU to GPU 1 - b = torch.FloatTensor(1).cuda() - # a.get_device() == b.get_device() == 1 - - c = a + b - # c.get_device() == 1 - - z = x + y - # z.get_device() == 0 - - # even within a context, you can give a GPU id to the .cuda call - d = torch.randn(2).cuda(2) - # d.get_device() == 2 - -Best practices --------------- - -Use pinned memory buffers -^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. warning: - - This is an advanced tip. You overuse of pinned memory can cause serious - problems if you'll be running low on RAM, and you should be aware that - pinning is often an expensive operation. - -Host to GPU copies are much faster when they originate from pinned (page-locked) -memory. CPU tensors and storages expose a :meth:`~torch.Tensor.pin_memory` -method, that returns a copy of the object, with data put in a pinned region. - -Also, once you pin a tensor or storage, you can use asynchronous GPU copies. -Just pass an additional ``async=True`` argument to a :meth:`~torch.Tensor.cuda` -call. This can be used to overlap data transfers with computation. - -You can make the :class:`~torch.utils.data.DataLoader` return batches placed in -pinned memory by passing ``pin_memory=True`` to its constructor. - -.. _cuda-nn-dataparallel-instead: - -Use nn.DataParallel instead of multiprocessing -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Most use cases involving batched input and multiple GPUs should default to using -:class:`~torch.nn.DataParallel` to utilize more than one GPU. Even with the GIL, -a single python process can saturate multiple GPUs. - -As of version 0.1.9, large numbers of GPUs (8+) might not be fully utilized. -However, this is a known issue that is under active development. As always, -test your use case. - -There are significant caveats to using CUDA models with -:mod:`~torch.multiprocessing`; unless care is taken to meet the data handling -requirements exactly, it is likely that your program will have incorrect or -undefined behavior. diff --git a/docs/_sources/notes/extending.rst.txt b/docs/_sources/notes/extending.rst.txt deleted file mode 100644 index 7ed7aad89c8d..000000000000 --- a/docs/_sources/notes/extending.rst.txt +++ /dev/null @@ -1,169 +0,0 @@ -Extending PyTorch -================= - -In this note we'll cover ways of extending :mod:`torch.nn`, -:mod:`torch.autograd`, and writing custom C extensions utilizing our C -libraries. - -Extending :mod:`torch.autograd` -------------------------------- - -.. currentmodule:: torch.autograd - -Adding operations to :mod:`~torch.autograd` requires implementing a new -:class:`Function` subclass for each operation. Recall that :class:`Function` s -are what :mod:`~torch.autograd` uses to compute the results and gradients, and -encode the operation history. Every new function requires you to implement 3 -methods: - -- ``__init__`` (*optional*) - if your operation is parametrized by/uses - objects different than :class:`Variable` s, you should pass them as arguments - to ``__init__``. For example, ``AddConstant`` function takes a scalar to add, - while ``Transpose`` requires specifying which two dimensions to swap. If your - function doesn't require any additional parameters, you can skip it. -- :meth:`~Function.forward` - the code that performs the operation. It can take - as many arguments as you want, with some of them being - optional, if you specify the default values. Keep in mind that only - :class:`Variable` s will be passed in here. You can return either a single - :class:`Variable` output, or a :class:`tuple` of :class:`Variable` s if there - are multiple. Also, please refer to the docs of :class:`Function` to find - descriptions of useful methods that can be called only from - :meth:`~Function.forward`. -- :meth:`~Function.backward` - gradient formula. It will be given - as many arguments as there were outputs, with each of them representing - gradient w.r.t. that output. It should return as many :class:`Tensor` s as - there were inputs, with each of them containing the gradient w.r.t. - corresponding input. If you inputs didn't require gradient (see - :attr:`~Variable.needs_input_grad`), or it was non-differentiable, you - can return :class:`None`. Also, if you have optional arguments to - :meth:`~Variable.forward` you can return more gradients than there were - inputs, as long as they're all :any:`python:None`. - -Below you can find code for a ``Linear`` function from :mod:`torch.nn`, with -additional comments:: - - # Inherit from Function - class Linear(Function): - - # bias is an optional argument - def forward(self, input, weight, bias=None): - self.save_for_backward(input, weight, bias) - output = input.mm(weight.t()) - if bias is not None: - output += bias.unsqueeze(0).expand_as(output) - return output - - # This function has only a single output, so it gets only one gradient - def backward(self, grad_output): - # This is a pattern that is very convenient - at the top of backward - # unpack saved_tensors and initialize all gradients w.r.t. inputs to - # None. Thanks to the fact that additional trailing Nones are - # ignored, the return statement is simple even when the function has - # optional inputs. - input, weight, bias = self.saved_tensors - grad_input = grad_weight = grad_bias = None - - # These needs_input_grad checks are optional and there only to - # improve efficiency. If you want to make your code simpler, you can - # skip them. Returning gradients for inputs that don't require it is - # not an error. - if self.needs_input_grad[0]: - grad_input = grad_output.mm(weight) - if self.needs_input_grad[1]: - grad_weight = grad_output.t().mm(input) - if bias is not None and self.needs_input_grad[2]: - grad_bias = grad_output.sum(0).squeeze(0) - - return grad_input, grad_weight, grad_bias - -Now, to make it easier to use these custom ops, we recommend wrapping them in -small helper functions:: - - def linear(input, weight, bias=None): - # First braces create a Function object. Any arguments given here - # will be passed to __init__. Second braces will invoke the __call__ - # operator, that will then use forward() to compute the result and - # return it. - return Linear()(input, weight, bias) - -You probably want to check if the backward method you implemented actually -computes the derivatives of your function. It is possible by comparing with -numerical approximations using small finite differences:: - - from torch.autograd import gradcheck - - # gradchek takes a tuple of tensor as input, check if your gradient - # evaluated with these tensors are close enough to numerical - # approximations and returns True if they all verify this condition. - input = (Variable(torch.randn(20,20).double(), requires_grad=True),) - test = gradcheck.gradcheck(Linear(), input, eps=1e-6, atol=1e-4) - print(test) - -Extending :mod:`torch.nn` -------------------------- - -.. currentmodule:: torch.nn - -:mod:`~torch.nn` exports two kinds of interfaces - modules and their functional -versions. You can extend it in both ways, but we recommend using modules for -all kinds of layers, that hold any parameters or buffers, and recommend using -a functional form parameter-less operations like activation functions, pooling, -etc. - -Adding a functional version of an operation is already fully covered in the -section above. - -Adding a :class:`Module` -^^^^^^^^^^^^^^^^^^^^^^^^ - -Since :mod:`~torch.nn` heavily utilizes :mod:`~torch.autograd`, adding a new -:class:`Module` requires implementing a :class:`~torch.autograd.Function` -that performs the operation and can compute the gradient. From now on let's -assume that we want to implement a ``Linear`` module and we have the function -implementated as in the listing above. There's very little code required to -add this. Now, there are two functions that need to be implemented: - -- ``__init__`` (*optional*) - takes in arguments such as kernel sizes, numbers - of features, etc. and initializes parameters and buffers. -- :meth:`~Module.forward` - instantiates a :class:`~torch.autograd.Function` and - uses it to perform the operation. It's very similar to a functional wrapper - shown above. - -This is how a ``Linear`` module can be implemented:: - - class Linear(nn.Module): - def __init__(self, input_features, output_features, bias=True): - self.input_features = input_features - self.output_features = output_features - - # nn.Parameter is a special kind of Variable, that will get - # automatically registered as Module's parameter once it's assigned - # as an attribute. Parameters and buffers need to be registered, or - # they won't appear in .parameters() (doesn't apply to buffers), and - # won't be converted when e.g. .cuda() is called. You can use - # .register_buffer() to register buffers. - # nn.Parameters can never be volatile and, different than Variables, - # they require gradients by default. - self.weight = nn.Parameter(torch.Tensor(input_features, output_features)) - if bias: - self.bias = nn.Parameter(torch.Tensor(output_features)) - else: - # You should always register all possible parameters, but the - # optional ones can be None if you want. - self.register_parameter('bias', None) - - # Not a very smart way to initialize weights - self.weight.data.uniform_(-0.1, 0.1) - if bias is not None: - self.bias.data.uniform_(-0.1, 0.1) - - def forward(self, input): - # See the autograd section for explanation of what happens here. - return Linear()(input, self.weight, self.bias) - - -Writing custom C extensions ---------------------------- - -Coming soon. For now you can find an example at -`GitHub `_. diff --git a/docs/_sources/notes/multiprocessing.rst.txt b/docs/_sources/notes/multiprocessing.rst.txt deleted file mode 100644 index 85b7d6a5faf9..000000000000 --- a/docs/_sources/notes/multiprocessing.rst.txt +++ /dev/null @@ -1,124 +0,0 @@ -Multiprocessing best practices -============================== - -:mod:`torch.multiprocessing` is a drop in replacement for Python's -:mod:`python:multiprocessing` module. It supports the exact same operations, -but extends it, so that all tensors sent through a -:class:`python:multiprocessing.Queue`, will have their data moved into shared -memory and will only send a handle to another process. - -.. note:: - - When a :class:`~torch.autograd.Variable` is sent to another process, both - the :attr:`Variable.data` and :attr:`Variable.grad.data` are going to be - shared. - -This allows to implement various training methods, like Hogwild, A3C, or any -others that require asynchronous operation. - -Sharing CUDA tensors --------------------- - -Sharing CUDA tensors between processes is supported only in Python 3, using -a ``spawn`` or ``forkserver`` start methods. :mod:`python:multiprocessing` in -Python 2 can only create subprocesses using ``fork``, and it's not supported -by the CUDA runtime. - -.. warning:: - - CUDA API requires that the allocation exported to other processes remains - valid as long as it's used by them. You should be careful and ensure that - CUDA tensors you shared don't go out of scope as long as it's necessary. - This shouldn't be a problem for sharing model parameters, but passing other - kinds of data should be done with care. Note that this restriction doesn't - apply to shared CPU memory. - -See also: :ref:`cuda-nn-dataparallel-instead` - - -Best practices and tips ------------------------ - -Avoiding and fighting deadlocks -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -There are a lot of things that can go wrong when a new process is spawned, with -the most common cause of deadlocks being background threads. If there's any -thread that holds a lock or imports a module, and ``fork`` is called, it's very -likely that the subprocess will be in a corrupted state and will deadlock or -fail in a different way. Note that even if you don't, Python built in -libraries do - no need to look further than :mod:`python:multiprocessing`. -:class:`python:multiprocessing.Queue` is actually a very complex class, that -spawns multiple threads used to serialize, send and receive objects, and they -can cause aforementioned problems too. If you find yourself in such situation -try using a :class:`~python:multiprocessing.queues.SimpleQueue`, that doesn't -use any additional threads. - -We're trying our best to make it easy for you and ensure these deadlocks don't -happen but some things are out of our control. If you have any issues you can't -cope with for a while, try reaching out on forums, and we'll see if it's an -issue we can fix. - -Reuse buffers passed through a Queue -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Remember that each time you put a :class:`~torch.Tensor` into a -:class:`python:multiprocessing.Queue`, it has to be moved into shared memory. -If it's already shared, it is a no-op, otherwise it will incur an additional -memory copy that can slow down the whole process. Even if you have a pool of -processes sending data to a single one, make it send the buffers back - this -is nearly free and will let you avoid a copy when sending next batch. - -Asynchronous multiprocess training (e.g. Hogwild) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Using :mod:`torch.multiprocessing`, it is possible to train a model -asynchronously, with parameters either shared all the time, or being -periodically synchronized. In the first case, we recommend sending over the whole -model object, while in the latter, we advise to only send the -:meth:`~torch.nn.Module.state_dict`. - -We recommend using :class:`python:multiprocessing.Queue` for passing all kinds -of PyTorch objects between processes. It is possible to e.g. inherit the tensors -and storages already in shared memory, when using the ``fork`` start method, -however it is very bug prone and should be used with care, and only by advanced -users. Queues, even though they're sometimes a less elegant solution, will work -properly in all cases. - -.. warning:: - - You should be careful about having global statements, that are not guarded - with an ``if __name__ == '__main__'``. If a different start method than - ``fork`` is used, they will be executed in all subprocesses. - -Hogwild -~~~~~~~ - -A concrete Hogwild implementation can be found in the `examples repository`__, -but to showcase the overall structure of the code, there's also a minimal -example below as well:: - - import torch.multiprocessing as mp - from model import MyModel - - def train(model): - # Construct data_loader, optimizer, etc. - for data, labels in data_loader: - optimizer.zero_grad() - loss_fn(model(data), labels).backward() - optimizer.step() # This will update the shared parameters - - if __name__ == '__main__': - num_processes = 4 - model = MyModel() - # NOTE: this is required for the ``fork`` method to work - model.share_memory() - processes = [] - for rank in range(num_processes): - p = mp.Process(target=train, args=(model,)) - p.start() - processes.append(p) - for p in processes: - p.join() - -.. __: https://github.com/pytorch/examples/tree/master/mnist_hogwild diff --git a/docs/_sources/notes/serialization.rst.txt b/docs/_sources/notes/serialization.rst.txt deleted file mode 100644 index 46800314cf83..000000000000 --- a/docs/_sources/notes/serialization.rst.txt +++ /dev/null @@ -1,34 +0,0 @@ - -Serialization semantics -======================= - -Best practices --------------- - -.. _recommend-saving-models: - -Recommended approach for saving a model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -There are two main approaches for serializing and restoring a model. - -The first (recommended) saves and loads only the model parameters:: - - torch.save(the_model.state_dict(), PATH) - -Then later:: - - the_model = TheModelClass(*args, **kwargs) - the_model.load_state_dict(torch.load(PATH)) - -The second saves and loads the entire model:: - - torch.save(the_model, PATH) - -Then later:: - - the_model = torch.load(PATH) - -However in this case, the serialized data is bound to the specific classes -and the exact directory structure used, so it can break in various ways when -used in other projects, or after some serious refactors. diff --git a/docs/_sources/optim.rst.txt b/docs/_sources/optim.rst.txt deleted file mode 100644 index 92e3f14d1fe5..000000000000 --- a/docs/_sources/optim.rst.txt +++ /dev/null @@ -1,116 +0,0 @@ -torch.optim -=================================== - -.. automodule:: torch.optim - -How to use an optimizer ------------------------ - -To use :mod:`torch.optim` you have to construct an optimizer object, that will hold -the current state and will update the parameters based on the computed gradients. - -Constructing it -^^^^^^^^^^^^^^^ - -To construct an :class:`Optimizer` you have to give it an iterable containing the -parameters (all should be :class:`~torch.autograd.Variable` s) to optimize. Then, -you can specify optimizer-specific options such as the learning rate, weight decay, etc. - -Example:: - - optimizer = optim.SGD(model.parameters(), lr = 0.01, momentum=0.9) - optimizer = optim.Adam([var1, var2], lr = 0.0001) - -Per-parameter options -^^^^^^^^^^^^^^^^^^^^^ - -:class:`Optimizer` s also support specifying per-parameter options. To do this, instead -of passing an iterable of :class:`~torch.autograd.Variable` s, pass in an iterable of -:class:`dict` s. Each of them will define a separate parameter group, and should contain -a ``params`` key, containing a list of parameters belonging to it. Other keys -should match the keyword arguments accepted by the optimizers, and will be used -as optimization options for this group. - -.. note:: - - You can still pass options as keyword arguments. They will be used as - defaults, in the groups that didn't override them. This is useful when you - only want to vary a single option, while keeping all others consistent - between parameter groups. - - -For example, this is very useful when one wants to specify per-layer learning rates:: - - optim.SGD([ - {'params': model.base.parameters()}, - {'params': model.classifier.parameters(), 'lr': 1e-3} - ], lr=1e-2, momentum=0.9) - -This means that ``model.base``'s parameters will use the default learning rate of ``1e-2``, -``model.classifier``'s parameters will use a learning rate of ``1e-3``, and a momentum of -``0.9`` will be used for all parameters - -Taking an optimization step -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -All optimizers implement a :func:`~Optimizer.step` method, that updates the -parameters. It can be used in two ways: - -``optimizer.step()`` -~~~~~~~~~~~~~~~~~~~~ - -This is a simplified version supported by most optimizers. The function can be -called once the gradients are computed using e.g. -:func:`~torch.autograd.Variable.backward`. - -Example:: - - for input, target in dataset: - optimizer.zero_grad() - output = model(input) - loss = loss_fn(output, target) - loss.backward() - optimizer.step() - -``optimizer.step(closure)`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Some optimization algorithms such as Conjugate Gradient and LBFGS need to -reevaluate the function multiple times, so you have to pass in a closure that -allows them to recompute your model. The closure should clear the gradients, -compute the loss, and return it. - -Example:: - - for input, target in dataset: - def closure(): - optimizer.zero_grad() - output = model(input) - loss = loss_fn(output, target) - loss.backward() - return loss - optimizer.step(closure) - -Algorithms ----------- - -.. autoclass:: Optimizer - :members: -.. autoclass:: Adadelta - :members: -.. autoclass:: Adagrad - :members: -.. autoclass:: Adam - :members: -.. autoclass:: Adamax - :members: -.. autoclass:: ASGD - :members: -.. autoclass:: LBFGS - :members: -.. autoclass:: RMSprop - :members: -.. autoclass:: Rprop - :members: -.. autoclass:: SGD - :members: diff --git a/docs/_sources/storage.rst.txt b/docs/_sources/storage.rst.txt deleted file mode 100644 index 61148916884c..000000000000 --- a/docs/_sources/storage.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -torch.Storage -=================================== - -A :class:`torch.Storage` is a contiguous, one-dimensional array of a single -data type. - -Every :class:`torch.Tensor` has a corresponding storage of the same data type. - -.. autoclass:: torch.FloatStorage - :members: - :undoc-members: - :inherited-members: diff --git a/docs/_sources/tensors.rst.txt b/docs/_sources/tensors.rst.txt deleted file mode 100644 index 7e3b84d79eea..000000000000 --- a/docs/_sources/tensors.rst.txt +++ /dev/null @@ -1,308 +0,0 @@ -.. currentmodule:: torch - -torch.Tensor -=================================== - -A :class:`torch.Tensor` is a multi-dimensional matrix containing elements of -a single data type. - -Torch defines seven CPU tensor types and eight GPU tensor types: - -======================== =========================== ================================ -Data type CPU tensor GPU tensor -======================== =========================== ================================ -32-bit floating point :class:`torch.FloatTensor` :class:`torch.cuda.FloatTensor` -64-bit floating point :class:`torch.DoubleTensor` :class:`torch.cuda.DoubleTensor` -16-bit floating point N/A :class:`torch.cuda.HalfTensor` -8-bit integer (unsigned) :class:`torch.ByteTensor` :class:`torch.cuda.ByteTensor` -8-bit integer (signed) :class:`torch.CharTensor` :class:`torch.cuda.CharTensor` -16-bit integer (signed) :class:`torch.ShortTensor` :class:`torch.cuda.ShortTensor` -32-bit integer (signed) :class:`torch.IntTensor` :class:`torch.cuda.IntTensor` -64-bit integer (signed) :class:`torch.LongTensor` :class:`torch.cuda.LongTensor` -======================== =========================== ================================ - -The :class:`torch.Tensor` constructor is an alias for the default tensor type -(:class:`torch.FloatTensor`). - -A tensor can be constructed from a Python :class:`list` or sequence: - -:: - - >>> torch.FloatTensor([[1, 2, 3], [4, 5, 6]]) - 1 2 3 - 4 5 6 - [torch.FloatTensor of size 2x3] - -An empty tensor can be constructed by specifying its size: - -:: - - >>> torch.IntTensor(2, 4).zero_() - 0 0 0 0 - 0 0 0 0 - [torch.IntTensor of size 2x4] - -The contents of a tensor can be accessed and modified using Python's indexing -and slicing notation: - -:: - - >>> x = torch.FloatTensor([[1, 2, 3], [4, 5, 6]]) - >>> print(x[1][2]) - 6.0 - >>> x[0][1] = 8 - >>> print(x) - 1 8 3 - 4 5 6 - [torch.FloatTensor of size 2x3] - -Each tensor has an associated :class:`torch.Storage`, which holds its data. -The tensor class provides multi-dimensional, `strided `_ -view of a storage and defines numeric operations on it. - -.. note:: - Methods which mutate a tensor are marked with an underscore suffix. - For example, :func:`torch.FloatTensor.abs_` computes the absolute value - in-place and returns the modified tensor, while :func:`torch.FloatTensor.abs` - computes the result in a new tensor. - -.. class:: Tensor() - Tensor(*sizes) - Tensor(size) - Tensor(sequence) - Tensor(ndarray) - Tensor(tensor) - Tensor(storage) - - Creates a new tensor from an optional size or data. - - If no arguments are given, an empty zero-dimensional tensor is returned. - If a :class:`numpy.ndarray`, :class:`torch.Tensor`, or :class:`torch.Storage` - is given, a new tensor that shares the same data is returned. If a Python - sequence is given, a new tensor is created from a copy of the sequence. - - .. automethod:: abs - .. automethod:: abs_ - .. automethod:: acos - .. automethod:: acos_ - .. automethod:: add - .. automethod:: add_ - .. automethod:: addbmm - .. automethod:: addbmm_ - .. automethod:: addcdiv - .. automethod:: addcdiv_ - .. automethod:: addcmul - .. automethod:: addcmul_ - .. automethod:: addmm - .. automethod:: addmm_ - .. automethod:: addmv - .. automethod:: addmv_ - .. automethod:: addr - .. automethod:: addr_ - .. automethod:: apply_ - .. automethod:: asin - .. automethod:: asin_ - .. automethod:: atan - .. automethod:: atan2 - .. automethod:: atan2_ - .. automethod:: atan_ - .. automethod:: baddbmm - .. automethod:: baddbmm_ - .. automethod:: bernoulli - .. automethod:: bernoulli_ - .. automethod:: bmm - .. automethod:: byte - .. automethod:: cauchy_ - .. automethod:: ceil - .. automethod:: ceil_ - .. automethod:: char - .. automethod:: chunk - .. automethod:: clamp - .. automethod:: clamp_ - .. automethod:: clone - .. automethod:: contiguous - .. automethod:: copy_ - .. automethod:: cos - .. automethod:: cos_ - .. automethod:: cosh - .. automethod:: cosh_ - .. automethod:: cpu - .. automethod:: cross - .. automethod:: cuda - .. automethod:: cumprod - .. automethod:: cumsum - .. automethod:: data_ptr - .. automethod:: diag - .. automethod:: dim - .. automethod:: dist - .. automethod:: div - .. automethod:: div_ - .. automethod:: dot - .. automethod:: double - .. automethod:: eig - .. automethod:: element_size - .. automethod:: eq - .. automethod:: eq_ - .. automethod:: equal - .. automethod:: exp - .. automethod:: exp_ - .. automethod:: expand - .. automethod:: expand_as - .. automethod:: exponential_ - .. automethod:: fill_ - .. automethod:: float - .. automethod:: floor - .. automethod:: floor_ - .. automethod:: fmod - .. automethod:: fmod_ - .. automethod:: frac - .. automethod:: frac_ - .. automethod:: gather - .. automethod:: ge - .. automethod:: ge_ - .. automethod:: gels - .. automethod:: geometric_ - .. automethod:: geqrf - .. automethod:: ger - .. automethod:: gesv - .. automethod:: gt - .. automethod:: gt_ - .. automethod:: half - .. automethod:: histc - .. automethod:: index - .. automethod:: index_add_ - .. automethod:: index_copy_ - .. automethod:: index_fill_ - .. automethod:: index_select - .. automethod:: int - .. automethod:: inverse - .. automethod:: is_contiguous - .. autoattribute:: is_cuda - :annotation: - .. automethod:: is_pinned - .. automethod:: is_set_to - .. automethod:: is_signed - .. automethod:: kthvalue - .. automethod:: le - .. automethod:: le_ - .. automethod:: lerp - .. automethod:: lerp_ - .. automethod:: log - .. automethod:: log1p - .. automethod:: log1p_ - .. automethod:: log_ - .. automethod:: log_normal_ - .. automethod:: long - .. automethod:: lt - .. automethod:: lt_ - .. automethod:: map_ - .. automethod:: masked_copy_ - .. automethod:: masked_fill_ - .. automethod:: masked_select - .. automethod:: max - .. automethod:: mean - .. automethod:: median - .. automethod:: min - .. automethod:: mm - .. automethod:: mode - .. automethod:: mul - .. automethod:: mul_ - .. automethod:: multinomial - .. automethod:: mv - .. automethod:: narrow - .. automethod:: ndimension - .. automethod:: ne - .. automethod:: ne_ - .. automethod:: neg - .. automethod:: neg_ - .. automethod:: nelement - .. automethod:: new - .. automethod:: nonzero - .. automethod:: norm - .. automethod:: normal_ - .. automethod:: numel - .. automethod:: numpy - .. automethod:: orgqr - .. automethod:: ormqr - .. automethod:: permute - .. automethod:: pin_memory - .. automethod:: potrf - .. automethod:: potri - .. automethod:: potrs - .. automethod:: pow - .. automethod:: pow_ - .. automethod:: prod - .. automethod:: pstrf - .. automethod:: qr - .. automethod:: random_ - .. automethod:: reciprocal - .. automethod:: reciprocal_ - .. automethod:: remainder - .. automethod:: remainder_ - .. automethod:: renorm - .. automethod:: renorm_ - .. automethod:: repeat - .. automethod:: resize_ - .. automethod:: resize_as_ - .. automethod:: round - .. automethod:: round_ - .. automethod:: rsqrt - .. automethod:: rsqrt_ - .. automethod:: scatter_ - .. automethod:: select - .. automethod:: set_ - .. automethod:: share_memory_ - .. automethod:: short - .. automethod:: sigmoid - .. automethod:: sigmoid_ - .. automethod:: sign - .. automethod:: sign_ - .. automethod:: sin - .. automethod:: sin_ - .. automethod:: sinh - .. automethod:: sinh_ - .. automethod:: size - .. automethod:: sort - .. automethod:: split - .. automethod:: sqrt - .. automethod:: sqrt_ - .. automethod:: squeeze - .. automethod:: squeeze_ - .. automethod:: std - .. automethod:: storage - .. automethod:: storage_offset - .. automethod:: storage_type - .. automethod:: stride - .. automethod:: sub - .. automethod:: sub_ - .. automethod:: sum - .. automethod:: svd - .. automethod:: symeig - .. automethod:: t - .. automethod:: t_ - .. automethod:: tan - .. automethod:: tan_ - .. automethod:: tanh - .. automethod:: tanh_ - .. automethod:: tolist - .. automethod:: topk - .. automethod:: trace - .. automethod:: transpose - .. automethod:: transpose_ - .. automethod:: tril - .. automethod:: tril_ - .. automethod:: triu - .. automethod:: triu_ - .. automethod:: trtrs - .. automethod:: trunc - .. automethod:: trunc_ - .. automethod:: type - .. automethod:: type_as - .. automethod:: unfold - .. automethod:: uniform_ - .. automethod:: unsqueeze - .. automethod:: unsqueeze_ - .. automethod:: var - .. automethod:: view - .. automethod:: view_as - .. automethod:: zero_ diff --git a/docs/_sources/torch.rst.txt b/docs/_sources/torch.rst.txt deleted file mode 100644 index 977808100b83..000000000000 --- a/docs/_sources/torch.rst.txt +++ /dev/null @@ -1,182 +0,0 @@ -torch -=================================== -.. automodule:: torch - -Tensors ----------------------------------- -.. autofunction:: is_tensor -.. autofunction:: is_storage -.. autofunction:: set_default_tensor_type -.. autofunction:: numel -.. autofunction:: set_printoptions - - -Creation Ops -~~~~~~~~~~~~~~~~~~~~~~ -.. autofunction:: eye -.. autofunction:: from_numpy -.. autofunction:: linspace -.. autofunction:: logspace -.. autofunction:: ones -.. autofunction:: rand -.. autofunction:: randn -.. autofunction:: randperm -.. autofunction:: range -.. autofunction:: zeros - - -Indexing, Slicing, Joining, Mutating Ops -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. autofunction:: cat -.. autofunction:: chunk -.. autofunction:: gather -.. autofunction:: index_select -.. autofunction:: masked_select -.. autofunction:: nonzero -.. autofunction:: split -.. autofunction:: squeeze -.. autofunction:: stack -.. autofunction:: t -.. autofunction:: transpose -.. autofunction:: unbind -.. autofunction:: unsqueeze - - -Random sampling ----------------------------------- -.. autofunction:: manual_seed -.. autofunction:: initial_seed -.. autofunction:: get_rng_state -.. autofunction:: set_rng_state -.. autodata:: default_generator -.. autofunction:: bernoulli -.. autofunction:: multinomial -.. autofunction:: normal - - -Serialization ----------------------------------- -.. autofunction:: save -.. autofunction:: load - - -Parallelism ----------------------------------- -.. autofunction:: get_num_threads -.. autofunction:: set_num_threads - - -Math operations ----------------------------------- - -Pointwise Ops -~~~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: abs -.. autofunction:: acos -.. autofunction:: add -.. autofunction:: addcdiv -.. autofunction:: addcmul -.. autofunction:: asin -.. autofunction:: atan -.. autofunction:: atan2 -.. autofunction:: ceil -.. autofunction:: clamp -.. autofunction:: cos -.. autofunction:: cosh -.. autofunction:: div -.. autofunction:: exp -.. autofunction:: floor -.. autofunction:: fmod -.. autofunction:: frac -.. autofunction:: lerp -.. autofunction:: log -.. autofunction:: log1p -.. autofunction:: mul -.. autofunction:: neg -.. autofunction:: pow -.. autofunction:: reciprocal -.. autofunction:: remainder -.. autofunction:: round -.. autofunction:: rsqrt -.. autofunction:: sigmoid -.. autofunction:: sign -.. autofunction:: sin -.. autofunction:: sinh -.. autofunction:: sqrt -.. autofunction:: tan -.. autofunction:: tanh -.. autofunction:: trunc - - -Reduction Ops -~~~~~~~~~~~~~~~~~~~~~~ -.. autofunction:: cumprod -.. autofunction:: cumsum -.. autofunction:: dist -.. autofunction:: mean -.. autofunction:: median -.. autofunction:: mode -.. autofunction:: norm -.. autofunction:: prod -.. autofunction:: std -.. autofunction:: sum -.. autofunction:: var - - -Comparison Ops -~~~~~~~~~~~~~~~~~~~~~~ -.. autofunction:: eq -.. autofunction:: equal -.. autofunction:: ge -.. autofunction:: gt -.. autofunction:: kthvalue -.. autofunction:: le -.. autofunction:: lt -.. autofunction:: max -.. autofunction:: min -.. autofunction:: ne -.. autofunction:: sort -.. autofunction:: topk - - -Other Operations -~~~~~~~~~~~~~~~~~~~~~~ -.. autofunction:: cross -.. autofunction:: diag -.. autofunction:: histc -.. autofunction:: renorm -.. autofunction:: trace -.. autofunction:: tril -.. autofunction:: triu - - -BLAS and LAPACK Operations -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. autofunction:: addbmm -.. autofunction:: addmm -.. autofunction:: addmv -.. autofunction:: addr -.. autofunction:: baddbmm -.. autofunction:: bmm -.. autofunction:: dot -.. autofunction:: eig -.. autofunction:: gels -.. autofunction:: geqrf -.. autofunction:: ger -.. autofunction:: gesv -.. autofunction:: inverse -.. autofunction:: mm -.. autofunction:: mv -.. autofunction:: orgqr -.. autofunction:: ormqr -.. autofunction:: potrf -.. autofunction:: potri -.. autofunction:: potrs -.. autofunction:: pstrf -.. autofunction:: qr -.. autofunction:: svd -.. autofunction:: symeig -.. autofunction:: trtrs - diff --git a/docs/_sources/torchvision/datasets.rst.txt b/docs/_sources/torchvision/datasets.rst.txt deleted file mode 100644 index 666203f1c81d..000000000000 --- a/docs/_sources/torchvision/datasets.rst.txt +++ /dev/null @@ -1,162 +0,0 @@ -torchvision.datasets -==================== - -The following dataset loaders are available: - -- `MNIST`_ -- `COCO (Captioning and Detection)`_ -- `LSUN Classification`_ -- `ImageFolder`_ -- `Imagenet-12`_ -- `CIFAR10 and CIFAR100`_ -- `STL10`_ - -Datasets have the API: - -- ``__getitem__`` -- ``__len__`` - They all subclass from ``torch.utils.data.Dataset`` - Hence, they can all be multi-threaded (python multiprocessing) using - standard torch.utils.data.DataLoader. - -For example: - -``torch.utils.data.DataLoader(coco_cap, batch_size=args.batchSize, shuffle=True, num_workers=args.nThreads)`` - -In the constructor, each dataset has a slightly different API as needed, -but they all take the keyword args: - -- ``transform`` - a function that takes in an image and returns a - transformed version -- common stuff like ``ToTensor``, ``RandomCrop``, etc. These can be - composed together with ``transforms.Compose`` (see transforms section - below) -- ``target_transform`` - a function that takes in the target and - transforms it. For example, take in the caption string and return a - tensor of word indices. - -MNIST -~~~~~ - -``dset.MNIST(root, train=True, transform=None, target_transform=None, download=False)`` - -- ``root`` : root directory of dataset where ``processed/training.pt`` and ``processed/test.pt`` exist. -- ``train`` : ``True`` = Training set, ``False`` = Test set -- ``download`` : ``True`` = downloads the dataset from the internet and puts it in root directory. If dataset already downloaded, place the processed dataset (function available in mnist.py) in the ``processed`` folder. - -COCO -~~~~ - -This requires the `COCO API to be installed`_ - -Captions: -^^^^^^^^^ - -``dset.CocoCaptions(root="dir where images are", annFile="json annotation file", [transform, target_transform])`` - -Example: - -.. code:: python - - import torchvision.datasets as dset - import torchvision.transforms as transforms - cap = dset.CocoCaptions(root = 'dir where images are', - annFile = 'json annotation file', - transform=transforms.ToTensor()) - - print('Number of samples: ', len(cap)) - img, target = cap[3] # load 4th sample - - print("Image Size: ", img.size()) - print(target) - -Output: - -:: - - Number of samples: 82783 - Image Size: (3L, 427L, 640L) - [u'A plane emitting smoke stream flying over a mountain.', - u'A plane darts across a bright blue sky behind a mountain covered in snow', - u'A plane leaves a contrail above the snowy mountain top.', - u'A mountain that has a plane flying overheard in the distance.', - u'A mountain view with a plume of smoke in the background'] - -Detection: -^^^^^^^^^^ - -``dset.CocoDetection(root="dir where images are", annFile="json annotation file", [transform, target_transform])`` - -LSUN -~~~~ - -``dset.LSUN(db_path, classes='train', [transform, target_transform])`` - -- db\_path = root directory for the database files -- ``classes`` = ``‘train’`` (all categories, training set), ``‘val’`` (all categories, validation set), ``‘test’`` (all categories, test set) -- [``‘bedroom\_train’``, ``‘church\_train’``, …] : a list of categories to load - -ImageFolder -~~~~~~~~~~~ - -A generic data loader where the images are arranged in this way: - -:: - - root/dog/xxx.png - root/dog/xxy.png - root/dog/xxz.png - - root/cat/123.png - root/cat/nsdf3.png - root/cat/asd932_.png - -``dset.ImageFolder(root="root folder path", [transform, target_transform])`` - -It has the members: - -- ``self.classes`` - The class names as a list -- ``self.class_to_idx`` - Corresponding class indices -- ``self.imgs`` - The list of (image path, class-index) tuples - -Imagenet-12 -~~~~~~~~~~~ - -This is simply implemented with an ImageFolder dataset. - -The data is preprocessed `as described -here `__ - -`Here is an -example `__. - -CIFAR -~~~~~ - -``dset.CIFAR10(root, train=True, transform=None, target_transform=None, download=False)`` - -``dset.CIFAR100(root, train=True, transform=None, target_transform=None, download=False)`` - -- ``root`` : root directory of dataset where there is folder - ``cifar-10-batches-py`` -- ``train`` : ``True`` = Training set, ``False`` = Test set -- ``download`` : ``True`` = downloads the dataset from the internet and - puts it in root directory. If dataset already downloaded, doesn't do anything. - -STL10 -~~~~~ - -``dset.STL10(root, split='train', transform=None, target_transform=None, download=False)`` - -- ``root`` : root directory of dataset where there is folder ``stl10_binary`` -- ``split`` : ``'train'`` = Training set, ``'test'`` = Test set, ``'unlabeled'`` = Unlabeled set, ``'train+unlabeled'`` = Training + Unlabeled set (missing label marked as ``-1``) -- ``download`` : ``True`` = downloads the dataset from the internet and puts it in root directory. If dataset already downloaded, doesn't do anything. - -.. _MNIST: #mnist -.. _COCO (Captioning and Detection): #coco -.. _LSUN Classification: #lsun -.. _ImageFolder: #imagefolder -.. _Imagenet-12: #imagenet-12 -.. _CIFAR10 and CIFAR100: #cifar -.. _STL10: #stl10 -.. _COCO API to be installed: https://github.com/pdollar/coco/tree/master/PythonAPI \ No newline at end of file diff --git a/docs/_sources/torchvision/models.rst.txt b/docs/_sources/torchvision/models.rst.txt deleted file mode 100644 index 5bde1742f133..000000000000 --- a/docs/_sources/torchvision/models.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -torchvision.models -=================== - -.. currentmodule:: torchvision.models - - -.. automodule:: torchvision.models - :members: alexnet, resnet18, resnet34, resnet50, resnet101, resnet152, - vgg11, vgg11_bn, vgg13, vgg13_bn, vgg16, vgg16_bn, vgg19, - vgg19_bn - :undoc-members: diff --git a/docs/_sources/torchvision/torchvision.rst.txt b/docs/_sources/torchvision/torchvision.rst.txt deleted file mode 100644 index bbffba767ca0..000000000000 --- a/docs/_sources/torchvision/torchvision.rst.txt +++ /dev/null @@ -1,5 +0,0 @@ -torchvision -=================== - -The :mod:`torchvision` package consists of popular datasets, model -architectures, and common image transformations for computer vision. diff --git a/docs/_sources/torchvision/transforms.rst.txt b/docs/_sources/torchvision/transforms.rst.txt deleted file mode 100644 index 9c97f0db2d60..000000000000 --- a/docs/_sources/torchvision/transforms.rst.txt +++ /dev/null @@ -1,40 +0,0 @@ -torchvision.transforms -====================== - -.. currentmodule:: torchvision.transforms - -.. autoclass:: Compose - -Transforms on PIL.Image ------------------------ - -.. autoclass:: Scale - -.. autoclass:: CenterCrop - -.. autoclass:: RandomCrop - -.. autoclass:: RandomHorizontalFlip - -.. autoclass:: RandomSizedCrop - -.. autoclass:: Pad - -Transforms on torch.\*Tensor ----------------------------- - -.. autoclass:: Normalize - - -Conversion Transforms ---------------------- - -.. autoclass:: ToTensor - -.. autoclass:: ToPILImage - -Generic Transforms ------------------- - -.. autoclass:: Lambda - diff --git a/docs/_sources/torchvision/utils.rst.txt b/docs/_sources/torchvision/utils.rst.txt deleted file mode 100644 index 468ddf683739..000000000000 --- a/docs/_sources/torchvision/utils.rst.txt +++ /dev/null @@ -1,9 +0,0 @@ -torchvision.utils -=================== - -.. currentmodule:: torchvision.utils - -.. autofunction:: make_grid - -.. autofunction:: save_image - diff --git a/docs/_static/ajax-loader.gif b/docs/_static/ajax-loader.gif deleted file mode 100644 index 61faf8cab239..000000000000 Binary files a/docs/_static/ajax-loader.gif and /dev/null differ diff --git a/docs/_static/basic.css b/docs/_static/basic.css deleted file mode 100644 index 7ed0e58edb31..000000000000 --- a/docs/_static/basic.css +++ /dev/null @@ -1,632 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox input[type="text"] { - width: 170px; -} - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; - margin-left: auto; - margin-right: auto; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px 7px 0 7px; - background-color: #ffe; - width: 40%; - float: right; -} - -p.sidebar-title { - font-weight: bold; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px 7px 0 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -div.admonition dl { - margin-bottom: 0; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - border: 0; - border-collapse: collapse; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -dl { - margin-bottom: 15px; -} - -dd p { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dt:target, .highlighted { - background-color: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -span.pre { - -moz-hyphens: none; - -ms-hyphens: none; - -webkit-hyphens: none; - hyphens: none; -} - -td.linenos pre { - padding: 5px 0px; - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - margin-left: 0.5em; -} - -table.highlighttable td { - padding: 0 0.5em 0 0.5em; -} - -div.code-block-caption { - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -div.code-block-caption + div > div.highlight > pre { - margin-top: 0; -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - padding: 1em 1em 0; -} - -div.literal-block-wrapper div.highlight { - margin: 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -span.eqno a.headerlink { - position: relative; - left: 0px; - z-index: 1; -} - -div.math:hover a.headerlink { - visibility: visible; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} \ No newline at end of file diff --git a/docs/_static/comment-bright.png b/docs/_static/comment-bright.png deleted file mode 100644 index 15e27edb12ac..000000000000 Binary files a/docs/_static/comment-bright.png and /dev/null differ diff --git a/docs/_static/comment-close.png b/docs/_static/comment-close.png deleted file mode 100644 index 4d91bcf57de8..000000000000 Binary files a/docs/_static/comment-close.png and /dev/null differ diff --git a/docs/_static/comment.png b/docs/_static/comment.png deleted file mode 100644 index dfbc0cbd512b..000000000000 Binary files a/docs/_static/comment.png and /dev/null differ diff --git a/docs/_static/css/badge_only.css b/docs/_static/css/badge_only.css deleted file mode 100644 index f4b46e9046c4..000000000000 --- a/docs/_static/css/badge_only.css +++ /dev/null @@ -1,2 +0,0 @@ -.fa:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-weight:normal;font-style:normal;src:url("../fonts/fontawesome-webfont.eot");src:url("../fonts/fontawesome-webfont.eot?#iefix") format("embedded-opentype"),url("../fonts/fontawesome-webfont.woff") format("woff"),url("../fonts/fontawesome-webfont.ttf") format("truetype"),url("../fonts/fontawesome-webfont.svg#FontAwesome") format("svg")}.fa:before{display:inline-block;font-family:FontAwesome;font-style:normal;font-weight:normal;line-height:1;text-decoration:inherit}a .fa{display:inline-block;text-decoration:inherit}li .fa{display:inline-block}li .fa-large:before,li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-0.8em}ul.fas li .fa{width:0.8em}ul.fas li .fa-large:before,ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before{content:""}.icon-book:before{content:""}.fa-caret-down:before{content:""}.icon-caret-down:before{content:""}.fa-caret-up:before{content:""}.icon-caret-up:before{content:""}.fa-caret-left:before{content:""}.icon-caret-left:before{content:""}.fa-caret-right:before{content:""}.icon-caret-right:before{content:""}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;border-top:solid 10px #343131;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;z-index:400}.rst-versions a{color:#2980B9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27AE60;*zoom:1}.rst-versions .rst-current-version:before,.rst-versions .rst-current-version:after{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book{float:left}.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#E74C3C;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#F1C40F;color:#000}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:gray;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:solid 1px #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px}.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge .fa-book{float:none}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book{float:left}.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge .rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width: 768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} -/*# sourceMappingURL=badge_only.css.map */ diff --git a/docs/_static/css/badge_only.css.map b/docs/_static/css/badge_only.css.map deleted file mode 100644 index a302a9f0afc7..000000000000 --- a/docs/_static/css/badge_only.css.map +++ /dev/null @@ -1,7 +0,0 @@ -{ -"version": 3, -"mappings": "CAyDA,SAAY,EACV,qBAAsB,EAAE,UAAW,EAqDrC,QAAS,EARP,IAAK,EAAE,AAAC,EACR,+BAAS,EAEP,MAAO,EAAE,IAAK,EACd,MAAO,EAAE,CAAE,EACb,cAAO,EACL,IAAK,EAAE,GAAI,EC1Gb,SAkBC,EAjBC,UAAW,ECFJ,UAAW,EDGlB,UAAW,EAHqC,KAAM,EAItD,SAAU,EAJsD,KAAM,EAapE,EAAG,EAAE,sCAAwB,EAC7B,EAAG,EAAE,8PAG2D,ECftE,SAAU,EACR,MAAO,EAAE,WAAY,EACrB,UAAW,EAAE,UAAW,EACxB,SAAU,EAAE,KAAM,EAClB,UAAW,EAAE,KAAM,EACnB,UAAW,EAAE,AAAC,EACd,cAAe,EAAE,MAAO,EAG1B,IAAK,EACH,MAAO,EAAE,WAAY,EACrB,cAAe,EAAE,MAAO,EAIxB,KAAG,EACD,MAAO,EAAE,WAAY,EACvB,sCAAiB,EAGf,IAAK,EAAE,MAAY,EAEvB,KAAM,EACJ,cAAe,EAAE,GAAI,EACrB,UAAW,EAAE,EAAG,EAChB,UAAW,EAAE,KAAM,EAEjB,YAAG,EACD,IAAK,EAAE,IAAI,EACb,oDAAiB,EAGf,aAAc,EAAE,OAAQ,EAG9B,cAAe,EACb,MAAO,EAAE,EAAO,EAElB,gBAAiB,EACf,MAAO,EAAE,EAAO,EAElB,oBAAqB,EACnB,MAAO,EAAE,EAAO,EAElB,sBAAuB,EACrB,MAAO,EAAE,EAAO,EAElB,kBAAmB,EACjB,MAAO,EAAE,EAAO,EAElB,oBAAqB,EACnB,MAAO,EAAE,EAAO,EAElB,oBAAqB,EACnB,MAAO,EAAE,EAAO,EAElB,sBAAuB,EACrB,MAAO,EAAE,EAAO,EAElB,qBAAsB,EACpB,MAAO,EAAE,EAAO,EAElB,uBAAwB,EACtB,MAAO,EAAE,EAAO,ECnElB,YAAa,EACX,OAAQ,EAAE,IAAK,EACf,KAAM,EAAE,AAAC,EACT,GAAI,EAAE,AAAC,EACP,IAAK,EC6E+B,IAAK,ED5EzC,IAAK,EEuC+B,MAAyB,EFtC7D,SAAU,EAAE,MAAkC,EAC9C,SAAU,EAAE,iBAAiC,EAC7C,UAAW,EEkDyB,sDAA2D,EFjD/F,MAAO,EC+E6B,EAAG,ED9EvC,cAAC,EACC,IAAK,EEkC6B,MAAK,EFjCvC,cAAe,EAAE,GAAI,EACvB,6BAAgB,EACd,MAAO,EAAE,GAAI,EACf,iCAAoB,EAClB,MAAO,EAAE,GAAqB,EAC9B,eAAgB,EAAE,MAAkC,EACpD,MAAO,EAAE,IAAK,EACd,SAAU,EAAE,IAAK,EACjB,QAAS,EAAE,EAAG,EACd,KAAM,EAAE,MAAO,EACf,IAAK,EEX6B,MAAM,EL4F1C,IAAK,EAAE,AAAC,EACR,iFAAS,EAEP,MAAO,EAAE,IAAK,EACd,MAAO,EAAE,CAAE,EACb,uCAAO,EACL,IAAK,EAAE,GAAI,EGrFX,qCAAG,EACD,IAAK,EEmB2B,MAAyB,EFlB3D,0CAAQ,EACN,IAAK,EAAE,GAAI,EACb,4CAAU,EACR,IAAK,EAAE,GAAI,EACb,iDAAiB,EACf,eAAgB,ECQgB,MAAI,EDPpC,IAAK,EEO2B,GAAM,EFNxC,wDAAwB,EACtB,eAAgB,EEsBgB,MAAO,EFrBvC,IAAK,ECzB2B,GAAI,ED0BxC,yCAA8B,EAC5B,MAAO,EAAE,IAAK,EAChB,gCAAmB,EACjB,QAAS,EAAE,EAAG,EACd,MAAO,EAAE,GAAqB,EAC9B,IAAK,EEJ6B,GAAY,EFK9C,MAAO,EAAE,GAAI,EACb,mCAAE,EACA,MAAO,EAAE,IAAK,EACd,KAAM,EAAE,EAAG,EACX,KAAM,EAAE,AAAC,EACT,KAAM,EAAE,KAAM,EACd,MAAO,EAAE,AAAC,EACV,SAAU,EAAE,gBAA6C,EAC3D,mCAAE,EACA,MAAO,EAAE,WAAY,EACrB,KAAM,EAAE,AAAC,EACT,qCAAC,EACC,MAAO,EAAE,WAAY,EACrB,MAAO,EAAE,EAAqB,EAC9B,IAAK,EEZyB,MAAyB,EFa7D,sBAAW,EACT,IAAK,EAAE,GAAI,EACX,KAAM,EAAE,GAAI,EACZ,IAAK,EAAE,GAAI,EACX,GAAI,EAAE,GAAI,EACV,KAAM,EAAE,GAAI,EACZ,QAAS,ECkByB,IAAK,EDjBvC,iCAAU,EACR,IAAK,EAAE,GAAI,EACb,+BAAQ,EACN,IAAK,EAAE,GAAI,EACb,oDAA+B,EAC7B,SAAU,EAAE,IAAK,EACjB,6DAAQ,EACN,IAAK,EAAE,GAAI,EACb,+DAAU,EACR,IAAK,EAAE,GAAI,EACf,2CAAoB,EAClB,IAAK,EAAE,GAAI,EACX,KAAM,EAAE,GAAI,EACZ,UAAW,EAAE,GAAI,EACjB,MAAO,EAAE,IAAuB,EAChC,MAAO,EAAE,IAAK,EACd,SAAU,EAAE,KAAM,EGhDpB,mCAAsB,EHmDxB,YAAa,EACX,IAAK,EAAE,EAAG,EACV,MAAO,EAAE,GAAI,EACb,kBAAO,EACL,MAAO,EAAE,IAAK", -"sources": ["../../../bower_components/wyrm/sass/wyrm_core/_mixin.sass","../../../bower_components/bourbon/dist/css3/_font-face.scss","../../../sass/_theme_badge_fa.sass","../../../sass/_theme_badge.sass","../../../bower_components/wyrm/sass/wyrm_core/_wy_variables.sass","../../../sass/_theme_variables.sass","../../../bower_components/neat/app/assets/stylesheets/grid/_media.scss"], -"names": [], -"file": "badge_only.css" -} diff --git a/docs/_static/css/pytorch_theme.css b/docs/_static/css/pytorch_theme.css deleted file mode 100644 index 31ba06911b7b..000000000000 --- a/docs/_static/css/pytorch_theme.css +++ /dev/null @@ -1,114 +0,0 @@ -body { - font-family: "Lato","proxima-nova","Helvetica Neue",Arial,sans-serif; -} - -/* Default header fonts are ugly */ -h1, h2, .rst-content .toctree-wrapper p.caption, h3, h4, h5, h6, legend, p.caption { - font-family: "Lato","proxima-nova","Helvetica Neue",Arial,sans-serif; -} - -/* Use white for docs background */ -.wy-side-nav-search { - background-color: #fff; -} - -.wy-nav-content-wrap, .wy-menu li.current > a { - background-color: #fff; -} - -@media screen and (min-width: 1400px) { - .wy-nav-content-wrap { - background-color: rgba(0, 0, 0, 0.0470588); - } - - .wy-nav-content { - background-color: #fff; - } -} - -/* Fixes for mobile */ -.wy-nav-top { - background-color: #fff; - background-image: url('../img/pytorch-logo-dark.svg'); - background-repeat: no-repeat; - background-position: center; - padding: 0; - margin: 0.4045em 0.809em; - color: #333; -} - -.wy-nav-top > a { - display: none; -} - -@media screen and (max-width: 768px) { - .wy-side-nav-search>a img.logo { - height: 60px; - } -} - -/* This is needed to ensure that logo above search scales properly */ -.wy-side-nav-search a { - display: block; -} - -/* This ensures that multiple constructors will remain in separate lines. */ -.rst-content dl:not(.docutils) dt { - display: table; -} - -/* Use our red for literals (it's very similar to the original color) */ -.rst-content tt.literal, .rst-content tt.literal, .rst-content code.literal { - color: #F05732; -} - -.rst-content tt.xref, a .rst-content tt, .rst-content tt.xref, -.rst-content code.xref, a .rst-content tt, a .rst-content code { - color: #404040; -} - -/* Change link colors (except for the menu) */ - -a { - color: #F05732; -} - -a:hover { - color: #F05732; -} - - -a:visited { - color: #D44D2C; -} - -.wy-menu a { - color: #b3b3b3; -} - -.wy-menu a:hover { - color: #b3b3b3; -} - -/* Default footer text is quite big */ -footer { - font-size: 80%; -} - -footer .rst-footer-buttons { - font-size: 125%; /* revert footer settings - 1/80% = 125% */ -} - -footer p { - font-size: 100%; -} - -/* For hidden headers that appear in TOC tree */ -/* see http://stackoverflow.com/a/32363545/3343043 */ -.rst-content .hidden-section { - display: none; -} - -nav .hidden-section { - display: inherit; -} diff --git a/docs/_static/css/theme.css b/docs/_static/css/theme.css deleted file mode 100644 index 4ffd7f9fbbe2..000000000000 --- a/docs/_static/css/theme.css +++ /dev/null @@ -1,5 +0,0 @@ -*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}audio:not([controls]){display:none}[hidden]{display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:hover,a:active{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:bold}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;color:#000;text-decoration:none}mark{background:#ff0;color:#000;font-style:italic;font-weight:bold}pre,code,.rst-content tt,.rst-content code,kbd,samp{font-family:monospace,serif;_font-family:"courier new",monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:before,q:after{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-0.5em}sub{bottom:-0.25em}ul,ol,dl{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure{margin:0}form{margin:0}fieldset{border:0;margin:0;padding:0}label{cursor:pointer}legend{border:0;*margin-left:-7px;padding:0;white-space:normal}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type="button"],input[type="reset"],input[type="submit"]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type="checkbox"],input[type="radio"]{box-sizing:border-box;padding:0;*width:13px;*height:13px}input[type="search"]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}input[type="search"]::-webkit-search-decoration,input[type="search"]::-webkit-search-cancel-button{-webkit-appearance:none}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}textarea{overflow:auto;vertical-align:top;resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:0.2em 0;background:#ccc;color:#000;padding:0.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none !important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{html,body,section{background:none !important}*{box-shadow:none !important;text-shadow:none !important;filter:none !important;-ms-filter:none !important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,.rst-content .toctree-wrapper p.caption,h3{orphans:3;widows:3}h2,.rst-content .toctree-wrapper p.caption,h3{page-break-after:avoid}}.fa:before,.wy-menu-vertical li span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.rst-content .admonition-title:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content dl dt .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before,.icon:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-alert,.rst-content .note,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .warning,.rst-content .seealso,.rst-content .admonition-todo,.btn,input[type="text"],input[type="password"],input[type="email"],input[type="url"],input[type="date"],input[type="month"],input[type="time"],input[type="datetime"],input[type="datetime-local"],input[type="week"],input[type="number"],input[type="search"],input[type="tel"],input[type="color"],select,textarea,.wy-menu-vertical li.on a,.wy-menu-vertical li.current>a,.wy-side-nav-search>a,.wy-side-nav-search .wy-dropdown>a,.wy-nav-top a{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;content:""}.clearfix:after{clear:both}/*! - * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome - * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) - */@font-face{font-family:'FontAwesome';src:url("../fonts/fontawesome-webfont.eot?v=4.7.0");src:url("../fonts/fontawesome-webfont.eot?#iefix&v=4.7.0") format("embedded-opentype"),url("../fonts/fontawesome-webfont.woff2?v=4.7.0") format("woff2"),url("../fonts/fontawesome-webfont.woff?v=4.7.0") format("woff"),url("../fonts/fontawesome-webfont.ttf?v=4.7.0") format("truetype"),url("../fonts/fontawesome-webfont.svg?v=4.7.0#fontawesomeregular") format("svg");font-weight:normal;font-style:normal}.fa,.wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand,.rst-content .admonition-title,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content dl dt .headerlink,.rst-content p.caption .headerlink,.rst-content tt.download span:first-child,.rst-content code.download span:first-child,.icon{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14286em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14286em;width:2.14286em;top:.14286em;text-align:center}.fa-li.fa-lg{left:-1.85714em}.fa-border{padding:.2em .25em .15em;border:solid 0.08em #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left,.wy-menu-vertical li span.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-left.toctree-expand,.wy-menu-vertical li.current>a span.fa-pull-left.toctree-expand,.rst-content .fa-pull-left.admonition-title,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content dl dt .fa-pull-left.headerlink,.rst-content p.caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.rst-content code.download span.fa-pull-left:first-child,.fa-pull-left.icon{margin-right:.3em}.fa.fa-pull-right,.wy-menu-vertical li span.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-right.toctree-expand,.wy-menu-vertical li.current>a span.fa-pull-right.toctree-expand,.rst-content .fa-pull-right.admonition-title,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content dl dt .fa-pull-right.headerlink,.rst-content p.caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.rst-content code.download span.fa-pull-right:first-child,.fa-pull-right.icon{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.wy-menu-vertical li span.pull-left.toctree-expand,.wy-menu-vertical li.on a span.pull-left.toctree-expand,.wy-menu-vertical li.current>a span.pull-left.toctree-expand,.rst-content .pull-left.admonition-title,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content dl dt .pull-left.headerlink,.rst-content p.caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.rst-content code.download span.pull-left:first-child,.pull-left.icon{margin-right:.3em}.fa.pull-right,.wy-menu-vertical li span.pull-right.toctree-expand,.wy-menu-vertical li.on a span.pull-right.toctree-expand,.wy-menu-vertical li.current>a span.pull-right.toctree-expand,.rst-content .pull-right.admonition-title,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content dl dt .pull-right.headerlink,.rst-content p.caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.rst-content code.download span.pull-right:first-child,.pull-right.icon{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scale(-1, 1);-ms-transform:scale(-1, 1);transform:scale(-1, 1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scale(1, -1);-ms-transform:scale(1, -1);transform:scale(1, -1)}:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270,:root .fa-flip-horizontal,:root .fa-flip-vertical{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-remove:before,.fa-close:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-gear:before,.fa-cog:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-rotate-right:before,.fa-repeat:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.rst-content .admonition-title:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-warning:before,.fa-exclamation-triangle:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-gears:before,.fa-cogs:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-save:before,.fa-floppy-o:before{content:""}.fa-square:before{content:""}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.wy-dropdown .caret:before,.icon-caret-down:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-unsorted:before,.fa-sort:before{content:""}.fa-sort-down:before,.fa-sort-desc:before{content:""}.fa-sort-up:before,.fa-sort-asc:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-legal:before,.fa-gavel:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-flash:before,.fa-bolt:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-paste:before,.fa-clipboard:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-unlink:before,.fa-chain-broken:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li.current>a span.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:""}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:""}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:""}.fa-euro:before,.fa-eur:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-rupee:before,.fa-inr:before{content:""}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:""}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:""}.fa-won:before,.fa-krw:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-turkish-lira:before,.fa-try:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li span.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-institution:before,.fa-bank:before,.fa-university:before{content:""}.fa-mortar-board:before,.fa-graduation-cap:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:""}.fa-file-zip-o:before,.fa-file-archive-o:before{content:""}.fa-file-sound-o:before,.fa-file-audio-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-resistance:before,.fa-rebel:before{content:""}.fa-ge:before,.fa-empire:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-y-combinator-square:before,.fa-yc-square:before,.fa-hacker-news:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-send:before,.fa-paper-plane:before{content:""}.fa-send-o:before,.fa-paper-plane-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-soccer-ball-o:before,.fa-futbol-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-shekel:before,.fa-sheqel:before,.fa-ils:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-hotel:before,.fa-bed:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-yc:before,.fa-y-combinator:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery:before,.fa-battery-full:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-stop-o:before,.fa-hand-paper-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-tv:before,.fa-television:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-asl-interpreting:before,.fa-american-sign-language-interpreting:before{content:""}.fa-deafness:before,.fa-hard-of-hearing:before,.fa-deaf:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-signing:before,.fa-sign-language:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-vcard:before,.fa-address-card:before{content:""}.fa-vcard-o:before,.fa-address-card-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer:before,.fa-thermometer-full:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bathtub:before,.fa-s15:before,.fa-bath:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0, 0, 0, 0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand,.rst-content .admonition-title,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content dl dt .headerlink,.rst-content p.caption .headerlink,.rst-content tt.download span:first-child,.rst-content code.download span:first-child,.icon,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context{font-family:inherit}.fa:before,.wy-menu-vertical li span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.rst-content .admonition-title:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content dl dt .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before,.icon:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before{font-family:"FontAwesome";display:inline-block;font-style:normal;font-weight:normal;line-height:1;text-decoration:inherit}a .fa,a .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand,a .rst-content .admonition-title,.rst-content a .admonition-title,a .rst-content h1 .headerlink,.rst-content h1 a .headerlink,a .rst-content h2 .headerlink,.rst-content h2 a .headerlink,a .rst-content h3 .headerlink,.rst-content h3 a .headerlink,a .rst-content h4 .headerlink,.rst-content h4 a .headerlink,a .rst-content h5 .headerlink,.rst-content h5 a .headerlink,a .rst-content h6 .headerlink,.rst-content h6 a .headerlink,a .rst-content dl dt .headerlink,.rst-content dl dt a .headerlink,a .rst-content p.caption .headerlink,.rst-content p.caption a .headerlink,a .rst-content tt.download span:first-child,.rst-content tt.download a span:first-child,a .rst-content code.download span:first-child,.rst-content code.download a span:first-child,a .icon{display:inline-block;text-decoration:inherit}.btn .fa,.btn .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .btn span.toctree-expand,.btn .wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.on a .btn span.toctree-expand,.btn .wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.current>a .btn span.toctree-expand,.btn .rst-content .admonition-title,.rst-content .btn .admonition-title,.btn .rst-content h1 .headerlink,.rst-content h1 .btn .headerlink,.btn .rst-content h2 .headerlink,.rst-content h2 .btn .headerlink,.btn .rst-content h3 .headerlink,.rst-content h3 .btn .headerlink,.btn .rst-content h4 .headerlink,.rst-content h4 .btn .headerlink,.btn .rst-content h5 .headerlink,.rst-content h5 .btn .headerlink,.btn .rst-content h6 .headerlink,.rst-content h6 .btn .headerlink,.btn .rst-content dl dt .headerlink,.rst-content dl dt .btn .headerlink,.btn .rst-content p.caption .headerlink,.rst-content p.caption .btn .headerlink,.btn .rst-content tt.download span:first-child,.rst-content tt.download .btn span:first-child,.btn .rst-content code.download span:first-child,.rst-content code.download .btn span:first-child,.btn .icon,.nav .fa,.nav .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .nav span.toctree-expand,.nav .wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.on a .nav span.toctree-expand,.nav .wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.current>a .nav span.toctree-expand,.nav .rst-content .admonition-title,.rst-content .nav .admonition-title,.nav .rst-content h1 .headerlink,.rst-content h1 .nav .headerlink,.nav .rst-content h2 .headerlink,.rst-content h2 .nav .headerlink,.nav .rst-content h3 .headerlink,.rst-content h3 .nav .headerlink,.nav .rst-content h4 .headerlink,.rst-content h4 .nav .headerlink,.nav .rst-content h5 .headerlink,.rst-content h5 .nav .headerlink,.nav .rst-content h6 .headerlink,.rst-content h6 .nav .headerlink,.nav .rst-content dl dt .headerlink,.rst-content dl dt .nav .headerlink,.nav .rst-content p.caption .headerlink,.rst-content p.caption .nav .headerlink,.nav .rst-content tt.download span:first-child,.rst-content tt.download .nav span:first-child,.nav .rst-content code.download span:first-child,.rst-content code.download .nav span:first-child,.nav .icon{display:inline}.btn .fa.fa-large,.btn .wy-menu-vertical li span.fa-large.toctree-expand,.wy-menu-vertical li .btn span.fa-large.toctree-expand,.btn .rst-content .fa-large.admonition-title,.rst-content .btn .fa-large.admonition-title,.btn .rst-content h1 .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.btn .rst-content dl dt .fa-large.headerlink,.rst-content dl dt .btn .fa-large.headerlink,.btn .rst-content p.caption .fa-large.headerlink,.rst-content p.caption .btn .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.rst-content tt.download .btn span.fa-large:first-child,.btn .rst-content code.download span.fa-large:first-child,.rst-content code.download .btn span.fa-large:first-child,.btn .fa-large.icon,.nav .fa.fa-large,.nav .wy-menu-vertical li span.fa-large.toctree-expand,.wy-menu-vertical li .nav span.fa-large.toctree-expand,.nav .rst-content .fa-large.admonition-title,.rst-content .nav .fa-large.admonition-title,.nav .rst-content h1 .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.nav .rst-content dl dt .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.nav .rst-content p.caption .fa-large.headerlink,.rst-content p.caption .nav .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.nav .rst-content code.download span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.nav .fa-large.icon{line-height:0.9em}.btn .fa.fa-spin,.btn .wy-menu-vertical li span.fa-spin.toctree-expand,.wy-menu-vertical li .btn span.fa-spin.toctree-expand,.btn .rst-content .fa-spin.admonition-title,.rst-content .btn .fa-spin.admonition-title,.btn .rst-content h1 .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.btn .rst-content dl dt .fa-spin.headerlink,.rst-content dl dt .btn .fa-spin.headerlink,.btn .rst-content p.caption .fa-spin.headerlink,.rst-content p.caption .btn .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.rst-content tt.download .btn span.fa-spin:first-child,.btn .rst-content code.download span.fa-spin:first-child,.rst-content code.download .btn span.fa-spin:first-child,.btn .fa-spin.icon,.nav .fa.fa-spin,.nav .wy-menu-vertical li span.fa-spin.toctree-expand,.wy-menu-vertical li .nav span.fa-spin.toctree-expand,.nav .rst-content .fa-spin.admonition-title,.rst-content .nav .fa-spin.admonition-title,.nav .rst-content h1 .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.nav .rst-content dl dt .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.nav .rst-content p.caption .fa-spin.headerlink,.rst-content p.caption .nav .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.nav .rst-content code.download span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.nav .fa-spin.icon{display:inline-block}.btn.fa:before,.wy-menu-vertical li span.btn.toctree-expand:before,.rst-content .btn.admonition-title:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content dl dt .btn.headerlink:before,.rst-content p.caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.rst-content code.download span.btn:first-child:before,.btn.icon:before{opacity:0.5;-webkit-transition:opacity 0.05s ease-in;-moz-transition:opacity 0.05s ease-in;transition:opacity 0.05s ease-in}.btn.fa:hover:before,.wy-menu-vertical li span.btn.toctree-expand:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content p.caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.rst-content code.download span.btn:first-child:hover:before,.btn.icon:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .wy-menu-vertical li span.toctree-expand:before,.wy-menu-vertical li .btn-mini span.toctree-expand:before,.btn-mini .rst-content .admonition-title:before,.rst-content .btn-mini .admonition-title:before,.btn-mini .rst-content h1 .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.btn-mini .rst-content dl dt .headerlink:before,.rst-content dl dt .btn-mini .headerlink:before,.btn-mini .rst-content p.caption .headerlink:before,.rst-content p.caption .btn-mini .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.rst-content tt.download .btn-mini span:first-child:before,.btn-mini .rst-content code.download span:first-child:before,.rst-content code.download .btn-mini span:first-child:before,.btn-mini .icon:before{font-size:14px;vertical-align:-15%}.wy-alert,.rst-content .note,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .warning,.rst-content .seealso,.rst-content .admonition-todo{padding:12px;line-height:24px;margin-bottom:24px;background:#e7f2fa}.wy-alert-title,.rst-content .admonition-title{color:#fff;font-weight:bold;display:block;color:#fff;background:#6ab0de;margin:-12px;padding:6px 12px;margin-bottom:12px}.wy-alert.wy-alert-danger,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.admonition-todo{background:#fdf3f2}.wy-alert.wy-alert-danger .wy-alert-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .danger .wy-alert-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .danger .admonition-title,.rst-content .error .admonition-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title{background:#f29f97}.wy-alert.wy-alert-warning,.rst-content .wy-alert-warning.note,.rst-content .attention,.rst-content .caution,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.tip,.rst-content .warning,.rst-content .wy-alert-warning.seealso,.rst-content .admonition-todo{background:#ffedcc}.wy-alert.wy-alert-warning .wy-alert-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .attention .wy-alert-title,.rst-content .caution .wy-alert-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .admonition-todo .wy-alert-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .attention .admonition-title,.rst-content .caution .admonition-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .warning .admonition-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .admonition-todo .admonition-title{background:#f0b37e}.wy-alert.wy-alert-info,.rst-content .note,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.rst-content .seealso,.rst-content .wy-alert-info.admonition-todo{background:#e7f2fa}.wy-alert.wy-alert-info .wy-alert-title,.rst-content .note .wy-alert-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.rst-content .note .admonition-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .seealso .admonition-title,.rst-content .wy-alert-info.admonition-todo .admonition-title{background:#6ab0de}.wy-alert.wy-alert-success,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.warning,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.admonition-todo{background:#dbfaf4}.wy-alert.wy-alert-success .wy-alert-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .hint .wy-alert-title,.rst-content .important .wy-alert-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .hint .admonition-title,.rst-content .important .admonition-title,.rst-content .tip .admonition-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.admonition-todo .admonition-title{background:#1abc9c}.wy-alert.wy-alert-neutral,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.admonition-todo{background:#f3f6f6}.wy-alert.wy-alert-neutral .wy-alert-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .admonition-title{color:#404040;background:#e1e4e5}.wy-alert.wy-alert-neutral a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.admonition-todo a{color:#2980B9}.wy-alert p:last-child,.rst-content .note p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.rst-content .seealso p:last-child,.rst-content .admonition-todo p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0px;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,0.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all 0.3s ease-in;-moz-transition:all 0.3s ease-in;transition:all 0.3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#27AE60}.wy-tray-container li.wy-tray-item-info{background:#2980B9}.wy-tray-container li.wy-tray-item-warning{background:#E67E22}.wy-tray-container li.wy-tray-item-danger{background:#E74C3C}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width: 768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px 12px;color:#fff;border:1px solid rgba(0,0,0,0.1);background-color:#27AE60;text-decoration:none;font-weight:normal;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;box-shadow:0px 1px 2px -1px rgba(255,255,255,0.5) inset,0px -2px 0px 0px rgba(0,0,0,0.1) inset;outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all 0.1s linear;-moz-transition:all 0.1s linear;transition:all 0.1s linear}.btn-hover{background:#2e8ece;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:0px -1px 0px 0px rgba(0,0,0,0.05) inset,0px 2px 0px 0px rgba(0,0,0,0.1) inset;padding:8px 12px 6px 12px}.btn:visited{color:#fff}.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:0.4;cursor:not-allowed;box-shadow:none}.btn-disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:0.4;cursor:not-allowed;box-shadow:none}.btn-disabled:hover,.btn-disabled:focus,.btn-disabled:active{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:0.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#2980B9 !important}.btn-info:hover{background-color:#2e8ece !important}.btn-neutral{background-color:#f3f6f6 !important;color:#404040 !important}.btn-neutral:hover{background-color:#e5ebeb !important;color:#404040}.btn-neutral:visited{color:#404040 !important}.btn-success{background-color:#27AE60 !important}.btn-success:hover{background-color:#295 !important}.btn-danger{background-color:#E74C3C !important}.btn-danger:hover{background-color:#ea6153 !important}.btn-warning{background-color:#E67E22 !important}.btn-warning:hover{background-color:#e98b39 !important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f !important}.btn-link{background-color:transparent !important;color:#2980B9;box-shadow:none;border-color:transparent !important}.btn-link:hover{background-color:transparent !important;color:#409ad5 !important;box-shadow:none}.btn-link:active{background-color:transparent !important;color:#409ad5 !important;box-shadow:none}.btn-link:visited{color:#9B59B6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:before,.wy-btn-group:after{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#fcfcfc;z-index:100;border:solid 1px #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,0.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#2980B9;color:#fff}.wy-dropdown-menu>dd.divider{border-top:solid 1px #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type="search"]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#fcfcfc;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#2980B9;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned input,.wy-form-aligned textarea,.wy-form-aligned select,.wy-form-aligned .wy-help-inline,.wy-form-aligned label{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{border:0;margin:0;padding:0}legend{display:block;width:100%;border:0;padding:0;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label{display:block;margin:0 0 .3125em 0;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;*zoom:1;max-width:68em;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:before,.wy-control-group:after{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group:before,.wy-control-group:after{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#E74C3C}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full input[type="text"],.wy-control-group .wy-form-full input[type="password"],.wy-control-group .wy-form-full input[type="email"],.wy-control-group .wy-form-full input[type="url"],.wy-control-group .wy-form-full input[type="date"],.wy-control-group .wy-form-full input[type="month"],.wy-control-group .wy-form-full input[type="time"],.wy-control-group .wy-form-full input[type="datetime"],.wy-control-group .wy-form-full input[type="datetime-local"],.wy-control-group .wy-form-full input[type="week"],.wy-control-group .wy-form-full input[type="number"],.wy-control-group .wy-form-full input[type="search"],.wy-control-group .wy-form-full input[type="tel"],.wy-control-group .wy-form-full input[type="color"],.wy-control-group .wy-form-halves input[type="text"],.wy-control-group .wy-form-halves input[type="password"],.wy-control-group .wy-form-halves input[type="email"],.wy-control-group .wy-form-halves input[type="url"],.wy-control-group .wy-form-halves input[type="date"],.wy-control-group .wy-form-halves input[type="month"],.wy-control-group .wy-form-halves input[type="time"],.wy-control-group .wy-form-halves input[type="datetime"],.wy-control-group .wy-form-halves input[type="datetime-local"],.wy-control-group .wy-form-halves input[type="week"],.wy-control-group .wy-form-halves input[type="number"],.wy-control-group .wy-form-halves input[type="search"],.wy-control-group .wy-form-halves input[type="tel"],.wy-control-group .wy-form-halves input[type="color"],.wy-control-group .wy-form-thirds input[type="text"],.wy-control-group .wy-form-thirds input[type="password"],.wy-control-group .wy-form-thirds input[type="email"],.wy-control-group .wy-form-thirds input[type="url"],.wy-control-group .wy-form-thirds input[type="date"],.wy-control-group .wy-form-thirds input[type="month"],.wy-control-group .wy-form-thirds input[type="time"],.wy-control-group .wy-form-thirds input[type="datetime"],.wy-control-group .wy-form-thirds input[type="datetime-local"],.wy-control-group .wy-form-thirds input[type="week"],.wy-control-group .wy-form-thirds input[type="number"],.wy-control-group .wy-form-thirds input[type="search"],.wy-control-group .wy-form-thirds input[type="tel"],.wy-control-group .wy-form-thirds input[type="color"]{width:100%}.wy-control-group .wy-form-full{float:left;display:block;margin-right:2.35765%;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.35765%;width:48.82117%}.wy-control-group .wy-form-halves:last-child{margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(2n+1){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.35765%;width:31.76157%}.wy-control-group .wy-form-thirds:last-child{margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control{margin:6px 0 0 0;font-size:90%}.wy-control-no-input{display:inline-block;margin:6px 0 0 0;font-size:90%}.wy-control-group.fluid-input input[type="text"],.wy-control-group.fluid-input input[type="password"],.wy-control-group.fluid-input input[type="email"],.wy-control-group.fluid-input input[type="url"],.wy-control-group.fluid-input input[type="date"],.wy-control-group.fluid-input input[type="month"],.wy-control-group.fluid-input input[type="time"],.wy-control-group.fluid-input input[type="datetime"],.wy-control-group.fluid-input input[type="datetime-local"],.wy-control-group.fluid-input input[type="week"],.wy-control-group.fluid-input input[type="number"],.wy-control-group.fluid-input input[type="search"],.wy-control-group.fluid-input input[type="tel"],.wy-control-group.fluid-input input[type="color"]{width:100%}.wy-form-message-inline{display:inline-block;padding-left:0.3em;color:#666;vertical-align:middle;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type="button"],input[type="reset"],input[type="submit"]{-webkit-appearance:button;cursor:pointer;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;*overflow:visible}input[type="text"],input[type="password"],input[type="email"],input[type="url"],input[type="date"],input[type="month"],input[type="time"],input[type="datetime"],input[type="datetime-local"],input[type="week"],input[type="number"],input[type="search"],input[type="tel"],input[type="color"]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border 0.3s linear;-moz-transition:border 0.3s linear;transition:border 0.3s linear}input[type="datetime-local"]{padding:.34375em .625em}input[disabled]{cursor:default}input[type="checkbox"],input[type="radio"]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type="search"]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type="search"]::-webkit-search-cancel-button,input[type="search"]::-webkit-search-decoration{-webkit-appearance:none}input[type="text"]:focus,input[type="password"]:focus,input[type="email"]:focus,input[type="url"]:focus,input[type="date"]:focus,input[type="month"]:focus,input[type="time"]:focus,input[type="datetime"]:focus,input[type="datetime-local"]:focus,input[type="week"]:focus,input[type="number"]:focus,input[type="search"]:focus,input[type="tel"]:focus,input[type="color"]:focus{outline:0;outline:thin dotted \9;border-color:#333}input.no-focus:focus{border-color:#ccc !important}input[type="file"]:focus,input[type="radio"]:focus,input[type="checkbox"]:focus{outline:thin dotted #333;outline:1px auto #129FEA}input[type="text"][disabled],input[type="password"][disabled],input[type="email"][disabled],input[type="url"][disabled],input[type="date"][disabled],input[type="month"][disabled],input[type="time"][disabled],input[type="datetime"][disabled],input[type="datetime-local"][disabled],input[type="week"][disabled],input[type="number"][disabled],input[type="search"][disabled],input[type="tel"][disabled],input[type="color"][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,textarea:focus:invalid,select:focus:invalid{color:#E74C3C;border:1px solid #E74C3C}input:focus:invalid:focus,textarea:focus:invalid:focus,select:focus:invalid:focus{border-color:#E74C3C}input[type="file"]:focus:invalid:focus,input[type="radio"]:focus:invalid:focus,input[type="checkbox"]:focus:invalid:focus{outline-color:#E74C3C}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border 0.3s linear;-moz-transition:border 0.3s linear;transition:border 0.3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}select[disabled],textarea[disabled],input[readonly],select[readonly],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type="radio"][disabled],input[type="checkbox"][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:#f3f6f6;border:solid 1px #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{position:absolute;content:"";display:block;left:0;top:0;width:36px;height:12px;border-radius:4px;background:#ccc;-webkit-transition:all 0.2s ease-in-out;-moz-transition:all 0.2s ease-in-out;transition:all 0.2s ease-in-out}.wy-switch:after{position:absolute;content:"";display:block;width:18px;height:18px;border-radius:4px;background:#999;left:-3px;top:-3px;-webkit-transition:all 0.2s ease-in-out;-moz-transition:all 0.2s ease-in-out;transition:all 0.2s ease-in-out}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#27AE60}.wy-switch.disabled{cursor:not-allowed;opacity:0.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#E74C3C}.wy-control-group.wy-control-group-error input[type="text"],.wy-control-group.wy-control-group-error input[type="password"],.wy-control-group.wy-control-group-error input[type="email"],.wy-control-group.wy-control-group-error input[type="url"],.wy-control-group.wy-control-group-error input[type="date"],.wy-control-group.wy-control-group-error input[type="month"],.wy-control-group.wy-control-group-error input[type="time"],.wy-control-group.wy-control-group-error input[type="datetime"],.wy-control-group.wy-control-group-error input[type="datetime-local"],.wy-control-group.wy-control-group-error input[type="week"],.wy-control-group.wy-control-group-error input[type="number"],.wy-control-group.wy-control-group-error input[type="search"],.wy-control-group.wy-control-group-error input[type="tel"],.wy-control-group.wy-control-group-error input[type="color"]{border:solid 1px #E74C3C}.wy-control-group.wy-control-group-error textarea{border:solid 1px #E74C3C}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#27AE60}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#E74C3C}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#E67E22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#2980B9}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width: 480px){.wy-form button[type="submit"]{margin:0.7em 0 0}.wy-form input[type="text"],.wy-form input[type="password"],.wy-form input[type="email"],.wy-form input[type="url"],.wy-form input[type="date"],.wy-form input[type="month"],.wy-form input[type="time"],.wy-form input[type="datetime"],.wy-form input[type="datetime-local"],.wy-form input[type="week"],.wy-form input[type="number"],.wy-form input[type="search"],.wy-form input[type="tel"],.wy-form input[type="color"]{margin-bottom:0.3em;display:block}.wy-form label{margin-bottom:0.3em;display:block}.wy-form input[type="password"],.wy-form input[type="email"],.wy-form input[type="url"],.wy-form input[type="date"],.wy-form input[type="month"],.wy-form input[type="time"],.wy-form input[type="datetime"],.wy-form input[type="datetime-local"],.wy-form input[type="week"],.wy-form input[type="number"],.wy-form input[type="search"],.wy-form input[type="tel"],.wy-form input[type="color"]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:0.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0 0}.wy-form .wy-help-inline,.wy-form-message-inline,.wy-form-message{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width: 768px){.tablet-hide{display:none}}@media screen and (max-width: 480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.wy-table,.rst-content table.docutils,.rst-content table.field-list{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.wy-table caption,.rst-content table.docutils caption,.rst-content table.field-list caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.wy-table td,.rst-content table.docutils td,.rst-content table.field-list td,.wy-table th,.rst-content table.docutils th,.rst-content table.field-list th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.wy-table td:first-child,.rst-content table.docutils td:first-child,.rst-content table.field-list td:first-child,.wy-table th:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list th:first-child{border-left-width:0}.wy-table thead,.rst-content table.docutils thead,.rst-content table.field-list thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.wy-table thead th,.rst-content table.docutils thead th,.rst-content table.field-list thead th{font-weight:bold;border-bottom:solid 2px #e1e4e5}.wy-table td,.rst-content table.docutils td,.rst-content table.field-list td{background-color:transparent;vertical-align:middle}.wy-table td p,.rst-content table.docutils td p,.rst-content table.field-list td p{line-height:18px}.wy-table td p:last-child,.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child{margin-bottom:0}.wy-table .wy-table-cell-min,.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min{width:1%;padding-right:0}.wy-table .wy-table-cell-min input[type=checkbox],.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox],.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:gray;font-size:90%}.wy-table-tertiary{color:gray;font-size:80%}.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td,.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td{background-color:#f3f6f6}.wy-table-backed{background-color:#f3f6f6}.wy-table-bordered-all,.rst-content table.docutils{border:1px solid #e1e4e5}.wy-table-bordered-all td,.rst-content table.docutils td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.wy-table-bordered-all tbody>tr:last-child td,.rst-content table.docutils tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px 0;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0 !important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#2980B9;text-decoration:none;cursor:pointer}a:hover{color:#3091d1}a:visited{color:#9B59B6}html{height:100%;overflow-x:hidden}body{font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;font-weight:normal;color:#404040;min-height:100%;overflow-x:hidden;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#E67E22 !important}a.wy-text-warning:hover{color:#eb9950 !important}.wy-text-info{color:#2980B9 !important}a.wy-text-info:hover{color:#409ad5 !important}.wy-text-success{color:#27AE60 !important}a.wy-text-success:hover{color:#36d278 !important}.wy-text-danger{color:#E74C3C !important}a.wy-text-danger:hover{color:#ed7669 !important}.wy-text-neutral{color:#404040 !important}a.wy-text-neutral:hover{color:#595959 !important}h1,h2,.rst-content .toctree-wrapper p.caption,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:"Roboto Slab","ff-tisa-web-pro","Georgia",Arial,sans-serif}p{line-height:24px;margin:0;font-size:16px;margin-bottom:24px}h1{font-size:175%}h2,.rst-content .toctree-wrapper p.caption{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #e1e4e5;margin:24px 0;padding:0}code,.rst-content tt,.rst-content code{white-space:nowrap;max-width:100%;background:#fff;border:solid 1px #e1e4e5;font-size:75%;padding:0 5px;font-family:Consolas,"Andale Mono WT","Andale Mono","Lucida Console","Lucida Sans Typewriter","DejaVu Sans Mono","Bitstream Vera Sans Mono","Liberation Mono","Nimbus Mono L",Monaco,"Courier New",Courier,monospace;color:#E74C3C;overflow-x:auto}code.code-large,.rst-content tt.code-large{font-size:90%}.wy-plain-list-disc,.rst-content .section ul,.rst-content .toctree-wrapper ul,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.wy-plain-list-disc li,.rst-content .section ul li,.rst-content .toctree-wrapper ul li,article ul li{list-style:disc;margin-left:24px}.wy-plain-list-disc li p:last-child,.rst-content .section ul li p:last-child,.rst-content .toctree-wrapper ul li p:last-child,article ul li p:last-child{margin-bottom:0}.wy-plain-list-disc li ul,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li ul,article ul li ul{margin-bottom:0}.wy-plain-list-disc li li,.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,article ul li li{list-style:circle}.wy-plain-list-disc li li li,.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,article ul li li li{list-style:square}.wy-plain-list-disc li ol li,.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,article ul li ol li{list-style:decimal}.wy-plain-list-decimal,.rst-content .section ol,.rst-content ol.arabic,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.wy-plain-list-decimal li,.rst-content .section ol li,.rst-content ol.arabic li,article ol li{list-style:decimal;margin-left:24px}.wy-plain-list-decimal li p:last-child,.rst-content .section ol li p:last-child,.rst-content ol.arabic li p:last-child,article ol li p:last-child{margin-bottom:0}.wy-plain-list-decimal li ul,.rst-content .section ol li ul,.rst-content ol.arabic li ul,article ol li ul{margin-bottom:0}.wy-plain-list-decimal li ul li,.rst-content .section ol li ul li,.rst-content ol.arabic li ul li,article ol li ul li{list-style:disc}.codeblock-example{border:1px solid #e1e4e5;border-bottom:none;padding:24px;padding-top:48px;font-weight:500;background:#fff;position:relative}.codeblock-example:after{content:"Example";position:absolute;top:0px;left:0px;background:#9B59B6;color:#fff;padding:6px 12px}.codeblock-example.prettyprint-example-only{border:1px solid #e1e4e5;margin-bottom:24px}.codeblock,pre.literal-block,.rst-content .literal-block,.rst-content pre.literal-block,div[class^='highlight']{border:1px solid #e1e4e5;padding:0px;overflow-x:auto;background:#fff;margin:1px 0 24px 0}.codeblock div[class^='highlight'],pre.literal-block div[class^='highlight'],.rst-content .literal-block div[class^='highlight'],div[class^='highlight'] div[class^='highlight']{border:none;background:none;margin:0}div[class^='highlight'] td.code{width:100%}.linenodiv pre{border-right:solid 1px #e6e9ea;margin:0;padding:12px 12px;font-family:Consolas,"Andale Mono WT","Andale Mono","Lucida Console","Lucida Sans Typewriter","DejaVu Sans Mono","Bitstream Vera Sans Mono","Liberation Mono","Nimbus Mono L",Monaco,"Courier New",Courier,monospace;font-size:12px;line-height:1.5;color:#d9d9d9}div[class^='highlight'] pre{white-space:pre;margin:0;padding:12px 12px;font-family:Consolas,"Andale Mono WT","Andale Mono","Lucida Console","Lucida Sans Typewriter","DejaVu Sans Mono","Bitstream Vera Sans Mono","Liberation Mono","Nimbus Mono L",Monaco,"Courier New",Courier,monospace;font-size:12px;line-height:1.5;display:block;overflow:auto;color:#404040}@media print{.codeblock,pre.literal-block,.rst-content .literal-block,.rst-content pre.literal-block,div[class^='highlight'],div[class^='highlight'] pre{white-space:pre-wrap}}.hll{background-color:#ffc;margin:0 -12px;padding:0 12px;display:block}.c{color:#998;font-style:italic}.err{color:#a61717;background-color:#e3d2d2}.k{font-weight:bold}.o{font-weight:bold}.cm{color:#998;font-style:italic}.cp{color:#999;font-weight:bold}.c1{color:#998;font-style:italic}.cs{color:#999;font-weight:bold;font-style:italic}.gd{color:#000;background-color:#fdd}.gd .x{color:#000;background-color:#faa}.ge{font-style:italic}.gr{color:#a00}.gh{color:#999}.gi{color:#000;background-color:#dfd}.gi .x{color:#000;background-color:#afa}.go{color:#888}.gp{color:#555}.gs{font-weight:bold}.gu{color:purple;font-weight:bold}.gt{color:#a00}.kc{font-weight:bold}.kd{font-weight:bold}.kn{font-weight:bold}.kp{font-weight:bold}.kr{font-weight:bold}.kt{color:#458;font-weight:bold}.m{color:#099}.s{color:#d14}.n{color:#333}.na{color:teal}.nb{color:#0086b3}.nc{color:#458;font-weight:bold}.no{color:teal}.ni{color:purple}.ne{color:#900;font-weight:bold}.nf{color:#900;font-weight:bold}.nn{color:#555}.nt{color:navy}.nv{color:teal}.ow{font-weight:bold}.w{color:#bbb}.mf{color:#099}.mh{color:#099}.mi{color:#099}.mo{color:#099}.sb{color:#d14}.sc{color:#d14}.sd{color:#d14}.s2{color:#d14}.se{color:#d14}.sh{color:#d14}.si{color:#d14}.sx{color:#d14}.sr{color:#009926}.s1{color:#d14}.ss{color:#990073}.bp{color:#999}.vc{color:teal}.vg{color:teal}.vi{color:teal}.il{color:#099}.gc{color:#999;background-color:#EAF2F5}.wy-breadcrumbs li{display:inline-block}.wy-breadcrumbs li.wy-breadcrumbs-aside{float:right}.wy-breadcrumbs li a{display:inline-block;padding:5px}.wy-breadcrumbs li a:first-child{padding-left:0}.wy-breadcrumbs li code,.wy-breadcrumbs li .rst-content tt,.rst-content .wy-breadcrumbs li tt{padding:5px;border:none;background:none}.wy-breadcrumbs li code.literal,.wy-breadcrumbs li .rst-content tt.literal,.rst-content .wy-breadcrumbs li tt.literal{color:#404040}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width: 480px){.wy-breadcrumbs-extra{display:none}.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:before,.wy-menu-horiz:after{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz ul,.wy-menu-horiz li{display:inline-block}.wy-menu-horiz li:hover{background:rgba(255,255,255,0.1)}.wy-menu-horiz li.divide-left{border-left:solid 1px #404040}.wy-menu-horiz li.divide-right{border-right:solid 1px #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{height:32px;display:inline-block;line-height:32px;padding:0 1.618em;margin-bottom:0;display:block;font-weight:bold;text-transform:uppercase;font-size:80%;color:#6f6f6f;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:solid 1px #404040}.wy-menu-vertical li.divide-bottom{border-bottom:solid 1px #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:gray;border-right:solid 1px #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.wy-menu-vertical li code,.wy-menu-vertical li .rst-content tt,.rst-content .wy-menu-vertical li tt{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li span.toctree-expand{display:block;float:left;margin-left:-1.2em;font-size:.8em;line-height:1.6em;color:#4d4d4d}.wy-menu-vertical li.on a,.wy-menu-vertical li.current>a{color:#404040;padding:.4045em 1.618em;font-weight:bold;position:relative;background:#fcfcfc;border:none;border-bottom:solid 1px #c9c9c9;border-top:solid 1px #c9c9c9;padding-left:1.618em -4px}.wy-menu-vertical li.on a:hover,.wy-menu-vertical li.current>a:hover{background:#fcfcfc}.wy-menu-vertical li.on a:hover span.toctree-expand,.wy-menu-vertical li.current>a:hover span.toctree-expand{color:gray}.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand{display:block;font-size:.8em;line-height:1.6em;color:#333}.wy-menu-vertical li.toctree-l1.current li.toctree-l2>ul,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>ul{display:none}.wy-menu-vertical li.toctree-l1.current li.toctree-l2.current>ul,.wy-menu-vertical li.toctree-l2.current li.toctree-l3.current>ul{display:block}.wy-menu-vertical li.toctree-l2.current>a{background:#c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{display:block;background:#c9c9c9;padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l2 a:hover span.toctree-expand{color:gray}.wy-menu-vertical li.toctree-l2 span.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3{font-size:.9em}.wy-menu-vertical li.toctree-l3.current>a{background:#bdbdbd;padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{display:block;background:#bdbdbd;padding:.4045em 5.663em;border-top:none;border-bottom:none}.wy-menu-vertical li.toctree-l3 a:hover span.toctree-expand{color:gray}.wy-menu-vertical li.toctree-l3 span.toctree-expand{color:#969696}.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical .local-toc li ul{display:block}.wy-menu-vertical li ul li a{margin-bottom:0;color:#b3b3b3;font-weight:normal}.wy-menu-vertical a{display:inline-block;line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:90%;color:#b3b3b3}.wy-menu-vertical a:hover{background-color:#4e4a4a;cursor:pointer}.wy-menu-vertical a:hover span.toctree-expand{color:#b3b3b3}.wy-menu-vertical a:active{background-color:#2980B9;cursor:pointer;color:#fff}.wy-menu-vertical a:active span.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#2980B9;text-align:center;padding:.809em;display:block;color:#fcfcfc;margin-bottom:.809em}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#2472a4}.wy-side-nav-search img{display:block;margin:auto auto .809em auto;height:45px;width:45px;background-color:#2980B9;padding:5px;border-radius:100%}.wy-side-nav-search>a,.wy-side-nav-search .wy-dropdown>a{color:#fcfcfc;font-size:100%;font-weight:bold;display:inline-block;padding:4px 6px;margin-bottom:.809em}.wy-side-nav-search>a:hover,.wy-side-nav-search .wy-dropdown>a:hover{background:rgba(255,255,255,0.1)}.wy-side-nav-search>a img.logo,.wy-side-nav-search .wy-dropdown>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search>a.icon img.logo,.wy-side-nav-search .wy-dropdown>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:normal;color:rgba(255,255,255,0.3)}.wy-nav .wy-menu-vertical header{color:#2980B9}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#2980B9;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:left repeat-y #fcfcfc;background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAyRpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuMy1jMDExIDY2LjE0NTY2MSwgMjAxMi8wMi8wNi0xNDo1NjoyNyAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIgeG1sbnM6c3RSZWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZVJlZiMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENTNiAoTWFjaW50b3NoKSIgeG1wTU06SW5zdGFuY2VJRD0ieG1wLmlpZDoxOERBMTRGRDBFMUUxMUUzODUwMkJCOThDMEVFNURFMCIgeG1wTU06RG9jdW1lbnRJRD0ieG1wLmRpZDoxOERBMTRGRTBFMUUxMUUzODUwMkJCOThDMEVFNURFMCI+IDx4bXBNTTpEZXJpdmVkRnJvbSBzdFJlZjppbnN0YW5jZUlEPSJ4bXAuaWlkOjE4REExNEZCMEUxRTExRTM4NTAyQkI5OEMwRUU1REUwIiBzdFJlZjpkb2N1bWVudElEPSJ4bXAuZGlkOjE4REExNEZDMEUxRTExRTM4NTAyQkI5OEMwRUU1REUwIi8+IDwvcmRmOkRlc2NyaXB0aW9uPiA8L3JkZjpSREY+IDwveDp4bXBtZXRhPiA8P3hwYWNrZXQgZW5kPSJyIj8+EwrlwAAAAA5JREFUeNpiMDU0BAgwAAE2AJgB9BnaAAAAAElFTkSuQmCC);background-size:300px 1px}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;background:#343131;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#2980B9;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:before,.wy-nav-top:after{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:bold}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#2980B9;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#fcfcfc;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,0.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:gray}footer p{margin-bottom:12px}footer span.commit code,footer span.commit .rst-content tt,.rst-content footer span.commit tt{padding:0px;font-family:Consolas,"Andale Mono WT","Andale Mono","Lucida Console","Lucida Sans Typewriter","DejaVu Sans Mono","Bitstream Vera Sans Mono","Liberation Mono","Nimbus Mono L",Monaco,"Courier New",Courier,monospace;font-size:1em;background:none;border:none;color:gray}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:before,.rst-footer-buttons:after{width:100%}.rst-footer-buttons:before,.rst-footer-buttons:after{display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:before,.rst-breadcrumbs-buttons:after{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:solid 1px #e1e4e5;padding-bottom:24px}#search-results .search li:first-child{border-top:solid 1px #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:gray;font-size:90%}@media screen and (max-width: 768px){.wy-body-for-nav{background:#fcfcfc}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-side-scroll{width:auto}.wy-side-nav-search{width:auto}.wy-menu.wy-menu-vertical{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width: 1400px){.wy-nav-content-wrap{background:rgba(0,0,0,0.05)}.wy-nav-content{margin:0;background:#fcfcfc}}@media print{.rst-versions,footer,.wy-nav-side{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;border-top:solid 10px #343131;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;z-index:400}.rst-versions a{color:#2980B9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27AE60;*zoom:1}.rst-versions .rst-current-version:before,.rst-versions .rst-current-version:after{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version span.toctree-expand,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content p.caption .headerlink,.rst-content p.caption .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .icon{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#E74C3C;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#F1C40F;color:#000}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:gray;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:solid 1px #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px}.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge .rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width: 768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content img{max-width:100%;height:auto !important}.rst-content .highlight>pre,.rst-content .linenodiv>pre{line-height:normal}.rst-content div.figure{margin-bottom:24px}.rst-content div.figure p.caption{font-style:italic}.rst-content div.figure.align-center{text-align:center}.rst-content .section>img,.rst-content .section>a>img{margin-bottom:24px}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content .note .last,.rst-content .attention .last,.rst-content .caution .last,.rst-content .danger .last,.rst-content .error .last,.rst-content .hint .last,.rst-content .important .last,.rst-content .tip .last,.rst-content .warning .last,.rst-content .seealso .last,.rst-content .admonition-todo .last{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,0.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent !important;border-color:rgba(0,0,0,0.1) !important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha li{list-style:upper-alpha}.rst-content .section ol p,.rst-content .section ul p{margin-bottom:12px}.rst-content .line-block{margin-left:24px}.rst-content .topic-title{font-weight:bold;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0px 0px 24px 24px}.rst-content .align-left{float:left;margin:0px 24px 24px 0px}.rst-content .align-center{margin:auto;display:block}.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content .toctree-wrapper p.caption .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content dl dt .headerlink,.rst-content p.caption .headerlink{display:none;visibility:hidden;font-size:14px}.rst-content h1 .headerlink:after,.rst-content h2 .headerlink:after,.rst-content .toctree-wrapper p.caption .headerlink:after,.rst-content h3 .headerlink:after,.rst-content h4 .headerlink:after,.rst-content h5 .headerlink:after,.rst-content h6 .headerlink:after,.rst-content dl dt .headerlink:after,.rst-content p.caption .headerlink:after{visibility:visible;content:"";font-family:FontAwesome;display:inline-block}.rst-content h1:hover .headerlink,.rst-content h2:hover .headerlink,.rst-content .toctree-wrapper p.caption:hover .headerlink,.rst-content h3:hover .headerlink,.rst-content h4:hover .headerlink,.rst-content h5:hover .headerlink,.rst-content h6:hover .headerlink,.rst-content dl dt:hover .headerlink,.rst-content p.caption:hover .headerlink{display:inline-block}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:solid 1px #e1e4e5}.rst-content .sidebar p,.rst-content .sidebar ul,.rst-content .sidebar dl{font-size:90%}.rst-content .sidebar .last{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:"Roboto Slab","ff-tisa-web-pro","Georgia",Arial,sans-serif;font-weight:bold;background:#e1e4e5;padding:6px 12px;margin:-24px;margin-bottom:24px;font-size:100%}.rst-content .highlighted{background:#F1C40F;display:inline-block;font-weight:bold;padding:0 6px}.rst-content .footnote-reference,.rst-content .citation-reference{vertical-align:super;font-size:90%}.rst-content table.docutils.citation,.rst-content table.docutils.footnote{background:none;border:none;color:gray}.rst-content table.docutils.citation td,.rst-content table.docutils.citation tr,.rst-content table.docutils.footnote td,.rst-content table.docutils.footnote tr{border:none;background-color:transparent !important;white-space:normal}.rst-content table.docutils.citation td.label,.rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}.rst-content table.docutils.citation tt,.rst-content table.docutils.citation code,.rst-content table.docutils.footnote tt,.rst-content table.docutils.footnote code{color:#555}.rst-content table.field-list{border:none}.rst-content table.field-list td{border:none;padding-top:5px}.rst-content table.field-list td>strong{display:inline-block;margin-top:3px}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left;padding-left:0}.rst-content tt,.rst-content tt,.rst-content code{color:#000;padding:2px 5px}.rst-content tt big,.rst-content tt em,.rst-content tt big,.rst-content code big,.rst-content tt em,.rst-content code em{font-size:100% !important;line-height:normal}.rst-content tt.literal,.rst-content tt.literal,.rst-content code.literal{color:#E74C3C}.rst-content tt.xref,a .rst-content tt,.rst-content tt.xref,.rst-content code.xref,a .rst-content tt,a .rst-content code{font-weight:bold;color:#404040}.rst-content a tt,.rst-content a tt,.rst-content a code{color:#2980B9}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:bold}.rst-content dl p,.rst-content dl table,.rst-content dl ul,.rst-content dl ol{margin-bottom:12px !important}.rst-content dl dd{margin:0 0 12px 24px}.rst-content dl:not(.docutils){margin-bottom:24px}.rst-content dl:not(.docutils) dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7f2fa;color:#2980B9;border-top:solid 3px #6ab0de;padding:6px;position:relative}.rst-content dl:not(.docutils) dt:before{color:#6ab0de}.rst-content dl:not(.docutils) dt .headerlink{color:#404040;font-size:100% !important}.rst-content dl:not(.docutils) dl dt{margin-bottom:6px;border:none;border-left:solid 3px #ccc;background:#f0f0f0;color:#555}.rst-content dl:not(.docutils) dl dt .headerlink{color:#404040;font-size:100% !important}.rst-content dl:not(.docutils) dt:first-child{margin-top:0}.rst-content dl:not(.docutils) tt,.rst-content dl:not(.docutils) tt,.rst-content dl:not(.docutils) code{font-weight:bold}.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) tt.descclassname,.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) code.descname,.rst-content dl:not(.docutils) tt.descclassname,.rst-content dl:not(.docutils) code.descclassname{background-color:transparent;border:none;padding:0;font-size:100% !important}.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) code.descname{font-weight:bold}.rst-content dl:not(.docutils) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:bold}.rst-content dl:not(.docutils) .property{display:inline-block;padding-right:8px}.rst-content .viewcode-link,.rst-content .viewcode-back{display:inline-block;color:#27AE60;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:bold}.rst-content tt.download,.rst-content code.download{background:inherit;padding:inherit;font-weight:normal;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content tt.download span:first-child,.rst-content code.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before{margin-right:4px}.rst-content .guilabel{border:1px solid #7fbbe3;background:#e7f2fa;font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .versionmodified{font-style:italic}@media screen and (max-width: 480px){.rst-content .sidebar{width:100%}}span[id*='MathJax-Span']{color:#404040}.math{text-align:center}@font-face{font-family:"Inconsolata";font-style:normal;font-weight:400;src:local("Inconsolata"),local("Inconsolata-Regular"),url(../fonts/Inconsolata-Regular.ttf) format("truetype")}@font-face{font-family:"Inconsolata";font-style:normal;font-weight:700;src:local("Inconsolata Bold"),local("Inconsolata-Bold"),url(../fonts/Inconsolata-Bold.ttf) format("truetype")}@font-face{font-family:"Lato";font-style:normal;font-weight:400;src:local("Lato Regular"),local("Lato-Regular"),url(../fonts/Lato-Regular.ttf) format("truetype")}@font-face{font-family:"Lato";font-style:normal;font-weight:700;src:local("Lato Bold"),local("Lato-Bold"),url(../fonts/Lato-Bold.ttf) format("truetype")}@font-face{font-family:"Roboto Slab";font-style:normal;font-weight:400;src:local("Roboto Slab Regular"),local("RobotoSlab-Regular"),url(../fonts/RobotoSlab-Regular.ttf) format("truetype")}@font-face{font-family:"Roboto Slab";font-style:normal;font-weight:700;src:local("Roboto Slab Bold"),local("RobotoSlab-Bold"),url(../fonts/RobotoSlab-Bold.ttf) format("truetype")} -/*# sourceMappingURL=theme.css.map */ diff --git a/docs/_static/css/theme.css.map b/docs/_static/css/theme.css.map deleted file mode 100644 index 6a2e96faa073..000000000000 --- a/docs/_static/css/theme.css.map +++ /dev/null @@ -1,7 +0,0 @@ -{ -"version": 3, -"mappings": "CACE,AAAE,ECQI,iBAAoB,EDPJ,SAAU,ECY1B,cAAiB,EDZD,SAAU,EC2B1B,SAAY,ED3BI,SAAU,EEFlC,uEAAiF,EAC/E,MAAO,EAAE,IAAK,EAEhB,iBAAoB,EAClB,MAAO,EAAE,WAAY,EACrB,OAAQ,EAAE,KAAM,EAChB,IAAK,EAAE,AAAC,EAEV,oBAAqB,EACnB,MAAO,EAAE,GAAI,EAEf,OAAQ,EACN,MAAO,EAAE,GAAI,EAEf,AAAC,EDLO,iBAAoB,ECMd,SAAU,EDDhB,cAAiB,ECCX,SAAU,EDchB,SAAY,ECdN,SAAU,EAExB,GAAI,EACF,QAAS,EAAE,GAAI,EACf,uBAAwB,EAAE,GAAI,EAC9B,mBAAoB,EAAE,GAAI,EAE5B,GAAI,EACF,KAAM,EAAE,AAAC,EAEX,eAAiB,EACf,MAAO,EAAE,AAAC,EAEZ,UAAW,EACT,YAAa,EAAE,SAAU,EAE3B,OAAS,EACP,UAAW,EAAE,GAAI,EAEnB,SAAU,EACR,KAAM,EAAE,AAAC,EAEX,EAAG,EACD,SAAU,EAAE,KAAM,EAGpB,EAAG,EACD,SAAU,EAAE,GAAI,EAChB,IAAK,EAAE,GAAI,EACX,cAAe,EAAE,GAAI,EAEvB,GAAI,EACF,SAAU,EAAE,GAAI,EAChB,IAAK,EAAE,GAAI,EACX,SAAU,EAAE,KAAM,EAClB,UAAW,EAAE,GAAI,EAEnB,kDAAoB,EAClB,UAAW,EAAE,cAAgB,EAC7B,WAAY,EAAE,sBAAwB,EACtC,QAAS,EAAE,EAAG,EAEhB,EAAG,EACD,UAAW,EAAE,EAAG,EAElB,AAAC,EACC,KAAM,EAAE,GAAI,EAEd,eAAiB,EACf,MAAO,EAAE,CAAE,EACX,MAAO,EAAE,GAAI,EAEf,IAAK,EACH,QAAS,EAAE,EAAG,EAEhB,MAAQ,EACN,QAAS,EAAE,EAAG,EACd,UAAW,EAAE,AAAC,EACd,OAAQ,EAAE,OAAQ,EAClB,aAAc,EAAE,OAAQ,EAE1B,EAAG,EACD,EAAG,EAAE,KAAM,EAEb,EAAG,EACD,KAAM,EAAE,MAAO,EAEjB,OAAU,EACR,KAAM,EAAE,AAAC,EACT,MAAO,EAAE,AAAC,EACV,SAAU,EAAE,GAAI,EAChB,eAAgB,EAAE,GAAI,EAExB,CAAE,EACA,SAAU,EAAE,GAAI,EAElB,CAAE,EACA,KAAM,EAAE,AAAC,EAEX,EAAG,EACD,KAAM,EAAE,AAAC,EACT,qBAAsB,EAAE,MAAO,EAC/B,aAAc,EAAE,KAAM,EACtB,QAAS,EAAE,GAAI,EAEjB,aAAc,EACZ,OAAQ,EAAE,KAAM,EAElB,KAAM,EACJ,KAAM,EAAE,AAAC,EAEX,GAAI,EACF,KAAM,EAAE,AAAC,EAEX,OAAQ,EACN,KAAM,EAAE,AAAC,EACT,KAAM,EAAE,AAAC,EACT,MAAO,EAAE,AAAC,EAEZ,IAAK,EACH,KAAM,EAAE,MAAO,EAEjB,KAAM,EACJ,KAAM,EAAE,AAAC,EACT,WAAY,EAAE,GAAI,EAClB,MAAO,EAAE,AAAC,EACV,UAAW,EAAE,KAAM,EAErB,2BAA+B,EAC7B,QAAS,EAAE,GAAI,EACf,KAAM,EAAE,AAAC,EACT,aAAc,EAAE,OAAQ,EACxB,cAAe,EAAE,KAAM,EAEzB,WAAa,EACX,UAAW,EAAE,KAAM,EAErB,mEAAuE,EACrE,KAAM,EAAE,MAAO,EACf,iBAAkB,EAAE,KAAM,EAC1B,QAAS,EAAE,MAAO,EAEpB,+BAAiC,EAC/B,KAAM,EAAE,MAAO,EAEjB,yCAA2C,EACzC,SAAU,EAAE,SAAU,EACtB,MAAO,EAAE,AAAC,EACV,KAAM,EAAE,GAAI,EACZ,MAAO,EAAE,GAAI,EAEf,mBAAoB,EAClB,iBAAkB,EAAE,QAAS,EAC7B,cAAe,EAAE,UAAW,EAC5B,iBAAkB,EAAE,UAAW,EAC/B,SAAU,EAAE,UAAW,EAEzB,iGAAmG,EACjG,iBAAkB,EAAE,GAAI,EAE1B,+CAAiD,EAC/C,KAAM,EAAE,AAAC,EACT,MAAO,EAAE,AAAC,EAEZ,OAAQ,EACN,OAAQ,EAAE,GAAI,EACd,aAAc,EAAE,EAAG,EACnB,KAAM,EAAE,OAAQ,EAElB,IAAK,EACH,cAAe,EAAE,OAAQ,EACzB,aAAc,EAAE,AAAC,EAEnB,CAAE,EACA,aAAc,EAAE,EAAG,EAErB,WAAY,EACV,KAAM,EAAE,MAAO,EACf,SAAU,EAAE,GAAI,EAChB,IAAK,EAAE,GAAK,EACZ,MAAO,EAAE,MAAO,EAElB,EAAG,EACD,MAAO,EAAE,IAAK,EACd,KAAM,EAAE,AAAC,EACT,UAAW,EAAE,KAAM,EACnB,OAAQ,EAAE,KAAM,EAChB,eAAgB,EAAE,UAAW,EAC7B,gBAAiB,EAAE,QAAS,EAC5B,SAAU,EAAE,GAAI,EAChB,QAAS,EAAE,EAAG,EACd,WAAY,EAAE,AAAC,EAEjB,KAAM,EACJ,MAAO,EAAE,GAAI,EAEf,MAAO,EACL,MAAO,EAAE,cAAe,EACxB,SAAU,EAAE,KAAM,EAEpB,cAAe,EACb,KAAM,EAAE,AAAC,EACT,GAAI,EAAE,YAAa,EACnB,KAAM,EAAE,EAAG,EACX,KAAM,EAAE,GAAI,EACZ,OAAQ,EAAE,KAAM,EAChB,MAAO,EAAE,AAAC,EACV,OAAQ,EAAE,OAAQ,EAClB,IAAK,EAAE,EAAG,EAEZ,+DAAiE,EAC/D,GAAI,EAAE,GAAI,EACV,KAAM,EAAE,GAAI,EACZ,KAAM,EAAE,AAAC,EACT,OAAQ,EAAE,MAAO,EACjB,OAAQ,EAAE,KAAM,EAChB,IAAK,EAAE,GAAI,EAEb,SAAU,EACR,SAAU,EAAE,KAAM,EAEpB,QAAS,EACP,OAAQ,EAAE,OAAQ,EAEpB,QAAU,EACR,QAAS,EAAE,GAAI,EAEjB,WAAY,EACV,gBAAmB,EACjB,SAAU,EAAE,cAAe,EAC7B,AAAC,EACC,SAAU,EAAE,cAAe,EAC3B,UAAW,EAAE,cAAe,EAC5B,KAAM,EAAE,cAAe,EACvB,SAAU,EAAE,cAAe,EAC7B,UAAY,EACV,cAAe,EAAE,QAAS,EAC5B,0DAA6D,EAC3D,MAAO,EAAE,CAAE,EACb,aAAe,EACb,gBAAiB,EAAE,IAAK,EAC1B,IAAK,EACH,MAAO,EAAE,iBAAkB,EAC7B,KAAO,EACL,gBAAiB,EAAE,IAAK,EAC1B,EAAG,EACD,QAAS,EAAE,cAAe,QAE1B,KAAM,EAAE,IAAK,EAEf,8CAAS,EACP,MAAO,EAAE,AAAC,EACV,KAAM,EAAE,AAAC,EACX,4CAAM,EACJ,eAAgB,EAAE,IAAK,GChM3B,ykDAAY,EACV,qBAAsB,EAAE,UAAW,EAqDrC,QAAS,EARP,IAAK,EAAE,AAAC,EACR,+BAAS,EAEP,MAAO,EAAE,IAAK,EACd,MAAO,EAAE,CAAE,EACb,cAAO,EACL,IAAK,EAAE,GAAI,EC7Gf;;;IAGG,DCAH,UAWC,CAVC,WAAW,CAAE,aAAa,CAC1B,GAAG,CAAE,+CAAgE,CACrE,GAAG,CAAE,wWAI8F,CAEnG,WAAW,CAAE,MAAM,CACnB,UAAU,CAAE,MAAM,CCVpB,kfAAmB,CACjB,OAAO,CAAE,YAAY,CACrB,IAAI,CAAE,uCAA8E,CACpF,SAAS,CAAE,OAAO,CAClB,cAAc,CAAE,IAAI,CACpB,sBAAsB,CAAE,WAAW,CACnC,uBAAuB,CAAE,SAAS,CCLpC,MAAsB,CACpB,SAAS,CAAE,SAAS,CACpB,WAAW,CAAE,KAAS,CACtB,cAAc,CAAE,IAAI,CAEtB,MAAsB,CAAE,SAAS,CAAE,GAAG,CACtC,MAAsB,CAAE,SAAS,CAAE,GAAG,CACtC,MAAsB,CAAE,SAAS,CAAE,GAAG,CACtC,MAAsB,CAAE,SAAS,CAAE,GAAG,CCVtC,MAAsB,CACpB,KAAK,CAAE,SAAW,CAClB,UAAU,CAAE,MAAM,CCDpB,MAAsB,CACpB,YAAY,CAAE,CAAC,CACf,WAAW,CCMU,SAAS,CDL9B,eAAe,CAAE,IAAI,CACrB,SAAK,CAAE,QAAQ,CAAE,QAAQ,CAE3B,MAAsB,CACpB,QAAQ,CAAE,QAAQ,CAClB,IAAI,CAAE,UAAa,CACnB,KAAK,CCDgB,SAAS,CDE9B,GAAG,CAAE,QAAU,CACf,UAAU,CAAE,MAAM,CAClB,YAAuB,CACrB,IAAI,CAAE,UAA0B,CEbpC,UAA0B,CACxB,OAAO,CAAE,gBAAgB,CACzB,MAAM,CAAE,iBAA4B,CACpC,aAAa,CAAE,IAAI,CAGrB,aAA6B,CAAE,KAAK,CAAE,IAAI,CAC1C,cAA8B,CAAE,KAAK,CAAE,KAAK,CAG1C,ksBAA8B,CAAE,YAAY,CAAE,IAAI,CAClD,ktBAA+B,CAAE,WAAW,CAAE,IAAI,CAIpD,WAAY,CAAE,KAAK,CAAE,KAAK,CAC1B,UAAW,CAAE,KAAK,CAAE,IAAI,CAGtB,kpBAAY,CAAE,YAAY,CAAE,IAAI,CAChC,kqBAAa,CAAE,WAAW,CAAE,IAAI,CCpBlC,QAAwB,CACtB,iBAAiB,CAAE,0BAA0B,CACrC,SAAS,CAAE,0BAA0B,CAG/C,SAAyB,CACvB,iBAAiB,CAAE,4BAA4B,CACvC,SAAS,CAAE,4BAA4B,CAGjD,0BASC,CARC,EAAG,CACD,iBAAiB,CAAE,YAAY,CACvB,SAAS,CAAE,YAAY,CAEjC,IAAK,CACH,iBAAiB,CAAE,cAAc,CACzB,SAAS,CAAE,cAAc,EAIrC,kBASC,CARC,EAAG,CACD,iBAAiB,CAAE,YAAY,CACvB,SAAS,CAAE,YAAY,CAEjC,IAAK,CACH,iBAAiB,CAAE,cAAc,CACzB,SAAS,CAAE,cAAc,EC5BrC,aAA8B,CCW5B,UAAU,CAAE,0DAAqE,CACjF,iBAAiB,CAAE,aAAgB,CAC/B,aAAa,CAAE,aAAgB,CAC3B,SAAS,CAAE,aAAgB,CDbrC,cAA8B,CCU5B,UAAU,CAAE,0DAAqE,CACjF,iBAAiB,CAAE,cAAgB,CAC/B,aAAa,CAAE,cAAgB,CAC3B,SAAS,CAAE,cAAgB,CDZrC,cAA8B,CCS5B,UAAU,CAAE,0DAAqE,CACjF,iBAAiB,CAAE,cAAgB,CAC/B,aAAa,CAAE,cAAgB,CAC3B,SAAS,CAAE,cAAgB,CDVrC,mBAAmC,CCcjC,UAAU,CAAE,oEAA+E,CAC3F,iBAAiB,CAAE,YAAoB,CACnC,aAAa,CAAE,YAAoB,CAC/B,SAAS,CAAE,YAAoB,CDhBzC,iBAAmC,CCajC,UAAU,CAAE,oEAA+E,CAC3F,iBAAiB,CAAE,YAAoB,CACnC,aAAa,CAAE,YAAoB,CAC/B,SAAS,CAAE,YAAoB,CDXzC,+GAIuC,CACrC,MAAM,CAAE,IAAI,CEfd,SAAyB,CACvB,QAAQ,CAAE,QAAQ,CAClB,OAAO,CAAE,YAAY,CACrB,KAAK,CAAE,GAAG,CACV,MAAM,CAAE,GAAG,CACX,WAAW,CAAE,GAAG,CAChB,cAAc,CAAE,MAAM,CAExB,yBAAyD,CACvD,QAAQ,CAAE,QAAQ,CAClB,IAAI,CAAE,CAAC,CACP,KAAK,CAAE,IAAI,CACX,UAAU,CAAE,MAAM,CAEpB,YAA4B,CAAE,WAAW,CAAE,OAAO,CAClD,YAA4B,CAAE,SAAS,CAAE,GAAG,CAC5C,WAA2B,CAAE,KAAK,CLTZ,IAAI,CMP1B,gBAAgC,CAAE,OAAO,CNwU1B,GAAO,CMvUtB,gBAAgC,CAAE,OAAO,CN2d1B,GAAO,CM1dtB,qCAAiC,CAAE,OAAO,CN0jB1B,GAAO,CMzjBvB,qBAAqC,CAAE,OAAO,CNsO1B,GAAO,CMrO3B,gBAAgC,CAAE,OAAO,CNuW1B,GAAO,CMtWtB,eAA+B,CAAE,OAAO,CNknB1B,GAAO,CMjnBrB,iBAAiC,CAAE,OAAO,CNsnB1B,GAAO,CMrnBvB,eAA+B,CAAE,OAAO,CNytB1B,GAAO,CMxtBrB,eAA+B,CAAE,OAAO,CNmR1B,GAAO,CMlRrB,mBAAmC,CAAE,OAAO,CNupB1B,GAAO,CMtpBzB,aAA6B,CAAE,OAAO,CNqpB1B,GAAO,CMppBnB,kBAAkC,CAAE,OAAO,CNspB1B,GAAO,CMrpBxB,gBAAgC,CAAE,OAAO,CNyI1B,GAAO,CMxItB,mDAEgC,CAAE,OAAO,CNqqB1B,GAAO,CMpqBtB,sBAAsC,CAAE,OAAO,CN8iB1B,GAAO,CM7iB5B,uBAAuC,CAAE,OAAO,CN4iB1B,GAAO,CM3iB7B,oBAAoC,CAAE,OAAO,CN4f1B,GAAO,CM3f1B,iBAAiC,CAAE,OAAO,CNikB1B,GAAO,CMhkBvB,8BAC8B,CAAE,OAAO,CNgK1B,GAAO,CM/JpB,kBAAkC,CAAE,OAAO,CN+qB1B,GAAO,CM9qBxB,iCAA+B,CAAE,OAAO,CNwV1B,GAAO,CMvVrB,iBAAiC,CAAE,OAAO,CNuP1B,GAAO,CMtPvB,kBAAkC,CAAE,OAAO,CNgJ1B,GAAO,CM/IxB,eAA+B,CAAE,OAAO,CNmhB1B,GAAO,CMlhBrB,uHAAmC,CAAE,OAAO,CNgM1B,GAAO,CM/LzB,8BAA8C,CAAE,OAAO,CNY1B,GAAO,CMXpC,4BAA4C,CAAE,OAAO,CNc1B,GAAO,CMblC,gBAAgC,CAAE,OAAO,CNqW1B,GAAO,CMpWtB,wBAAwC,CAAE,OAAO,CNwe1B,GAAO,CMve9B,yCACiC,CAAE,OAAO,CNsgB1B,GAAO,CMrgBvB,kBAAkC,CAAE,OAAO,CNggB1B,GAAO,CM/fxB,mBAAmC,CAAE,OAAO,CNwY1B,GAAO,CMvYzB,eAA+B,CAAE,OAAO,CN2Y1B,GAAO,CM1YrB,eAA+B,CAAE,OAAO,CN4P1B,GAAO,CM3PrB,qBAAqC,CAAE,OAAO,CNoU1B,GAAO,CMnU3B,qBAAqC,CAAE,OAAO,CNitB1B,GAAO,CMhtB3B,sBAAsC,CAAE,OAAO,CN+sB1B,GAAO,CM9sB5B,oBAAoC,CAAE,OAAO,CNgtB1B,GAAO,CM/sB1B,iBAAiC,CAAE,OAAO,CNye1B,GAAO,CMxevB,kBAAkC,CAAE,OAAO,CNwB1B,GAAO,CMvBxB,cAA8B,CAAE,OAAO,CNymB1B,GAAO,CMxmBpB,eAA+B,CAAE,OAAO,CNymB1B,GAAO,CMxmBrB,iCAA+B,CAAE,OAAO,CNyD1B,GAAO,CMxDrB,mBAAmC,CAAE,OAAO,CNyD1B,GAAO,CMxDzB,gBAAgC,CAAE,OAAO,CN+d1B,GAAO,CM9dtB,iBAAiC,CAAE,OAAO,CN2E1B,GAAO,CM1EvB,eAA+B,CAAE,OAAO,CN0P1B,GAAO,CMzPrB,eAA+B,CAAE,OAAO,CNiD1B,GAAO,CMhDrB,iBAAiC,CAAE,OAAO,CN0V1B,GAAO,CMzVvB,sBAAsC,CAAE,OAAO,CNwmB1B,GAAO,CMvmB5B,qBAAqC,CAAE,OAAO,CNwmB1B,GAAO,CMvmB3B,qBAAqC,CAAE,OAAO,CNpC1B,GAAO,CMqC3B,uBAAuC,CAAE,OAAO,CNvC1B,GAAO,CMwC7B,sBAAsC,CAAE,OAAO,CNrC1B,GAAO,CMsC5B,wBAAwC,CAAE,OAAO,CNxC1B,GAAO,CMyC9B,eAA+B,CAAE,OAAO,CN+W1B,GAAO,CM9WrB,oCACkC,CAAE,OAAO,CN2a1B,GAAO,CM1axB,iBAAiC,CAAE,OAAO,CNsU1B,GAAO,CMrUvB,uBAAuC,CAAE,OAAO,CNkrB1B,GAAO,CMjrB7B,sDAEoC,CAAE,OAAO,CN0b1B,GAAO,CMzb1B,iBAAiC,CAAE,OAAO,CNkb1B,GAAO,CMjbvB,qBAAqC,CAAE,OAAO,CNwX1B,GAAO,CMvX3B,iBAAiC,CAAE,OAAO,CNtD1B,GAAO,CMuDvB,eAA+B,CAAE,OAAO,CNmnB1B,GAAO,CMlnBrB,0CAC0C,CAAE,OAAO,CN+a1B,GAAO,CM9ahC,yBAAyC,CAAE,OAAO,CN8f1B,GAAO,CM7f/B,yBAAyC,CAAE,OAAO,CN+E1B,GAAO,CM9E/B,iBAAiC,CAAE,OAAO,CNzB1B,GAAO,CM0BvB,wBAAwC,CAAE,OAAO,CNmjB1B,GAAO,CMljB9B,wBAAwC,CAAE,OAAO,CNqL1B,GAAO,CMpL9B,mBAAmC,CAAE,OAAO,CNlB1B,GAAO,CMmBzB,eAA+B,CAAE,OAAO,CNsb1B,GAAO,CMrbrB,gBAAgC,CAAE,OAAO,CNga1B,GAAO,CM/ZtB,eAA+B,CAAE,OAAO,CNmjB1B,GAAO,CMljBrB,kBAAkC,CAAE,OAAO,CN+N1B,GAAO,CM9NxB,uBAAuC,CAAE,OAAO,CNgL1B,GAAO,CM/K7B,uBAAuC,CAAE,OAAO,CN4iB1B,GAAO,CM3iB7B,gBAAgC,CAAE,OAAO,CN+I1B,GAAO,CM9ItB,uBAAuC,CAAE,OAAO,CNyE1B,GAAO,CMxE7B,wBAAwC,CAAE,OAAO,CNyE1B,GAAO,CMxE9B,sBAAsC,CAAE,OAAO,CNkb1B,GAAO,CMjb5B,uBAAuC,CAAE,OAAO,CNuX1B,GAAO,CMtX7B,8FAAuC,CAAE,OAAO,CN2lB1B,GAAO,CM1lB7B,+FAAuC,CAAE,OAAO,CN2D1B,GAAO,CM1D7B,0BAA0C,CAAE,OAAO,CNyb1B,GAAO,CMxbhC,sBAAsC,CAAE,OAAO,CN0S1B,GAAO,CMzS5B,qBAAqC,CAAE,OAAO,CN0G1B,GAAO,CMzG3B,yBAAyC,CAAE,OAAO,CNulB1B,GAAO,CMtlB/B,yBAAyC,CAAE,OAAO,CNuD1B,GAAO,CMtD/B,cAA8B,CAAE,OAAO,CNnC1B,GAAO,CMoCpB,qBAAqC,CAAE,OAAO,CNnD1B,GAAO,CMoD3B,sBAAsC,CAAE,OAAO,CNnD1B,GAAO,CMoD5B,mBAAmC,CAAE,OAAO,CNnD1B,GAAO,CMoDzB,qBAAqC,CAAE,OAAO,CNvD1B,GAAO,CMwD3B,wCACgC,CAAE,OAAO,CN4d1B,GAAO,CM3dtB,iBAAiC,CAAE,OAAO,CN8I1B,GAAO,CM7IvB,mBAAmC,CAAE,OAAO,CNsF1B,GAAO,CMrFzB,eAA+B,CAAE,OAAO,CN+Z1B,GAAO,CM9ZrB,gBAAgC,CAAE,OAAO,CNoW1B,GAAO,CMnWtB,mBAAmC,CAAE,OAAO,CNpD1B,GAAO,CMqDzB,gNAA6C,CAAE,OAAO,CNuI1B,GAAO,CMtInC,eAA+B,CAAE,OAAO,CNkN1B,GAAO,CMjNrB,eAA+B,CAAE,OAAO,CN0S1B,GAAO,CMzSrB,iCAA+B,CAAE,OAAO,CN6K1B,GAAO,CM5KrB,cAA8B,CAAE,OAAO,CNyI1B,GAAO,CMxIpB,oBAAoC,CAAE,OAAO,CNyI1B,GAAO,CMxI1B,kDAC+C,CAAE,OAAO,CNiI1B,GAAO,CMhIrC,gBAAgC,CAAE,OAAO,CN+Y1B,GAAO,CM9YtB,mBAAmC,CAAE,OAAO,CNA1B,GAAO,CMCzB,iBAAiC,CAAE,OAAO,CNoa1B,GAAO,CMnavB,kBAAkC,CAAE,OAAO,CNgE1B,GAAO,CM/DxB,iBAAiC,CAAE,OAAO,CN6T1B,GAAO,CM5TvB,qBAAqC,CAAE,OAAO,CNuC1B,GAAO,CMtC3B,uBAAuC,CAAE,OAAO,CNmC1B,GAAO,CMlC7B,kBAAkC,CAAE,OAAO,CN+a1B,GAAO,CM9axB,wBAAwC,CAAE,OAAO,CNkd1B,GAAO,CMjd9B,iBAAiC,CAAE,OAAO,CN0K1B,GAAO,CMzKvB,sBAAsC,CAAE,OAAO,CN2K1B,GAAO,CM1K5B,mBAAmC,CAAE,OAAO,CN3E1B,GAAO,CM4EzB,mBAAmC,CAAE,OAAO,CN7E1B,GAAO,CM8EzB,2CACoC,CAAE,OAAO,CNlE1B,GAAO,CMmE1B,yBAAyC,CAAE,OAAO,CN+kB1B,GAAO,CM9kB/B,0BAA0C,CAAE,OAAO,CN4H1B,GAAO,CM3HhC,uBAAuC,CAAE,OAAO,CNT1B,GAAO,CMU7B,cAA8B,CAAE,OAAO,CN2Q1B,GAAO,CM1QpB,gCAC+B,CAAE,OAAO,CN6C1B,GAAO,CM5CrB,mBAAmC,CAAE,OAAO,CNkD1B,GAAO,CMjDzB,sBAAsC,CAAE,OAAO,CNsiB1B,GAAO,CMriB5B,wBAAwC,CAAE,OAAO,CNoiB1B,GAAO,CMniB9B,oBAAoC,CAAE,OAAO,CN2e1B,GAAO,CM1e1B,kBAAkC,CAAE,OAAO,CN8N1B,GAAO,CM7NxB,mBAAmC,CAAE,OAAO,CNoc1B,GAAO,CMnczB,0BAA0C,CAAE,OAAO,CNuR1B,GAAO,CMtRhC,qBAAqC,CAAE,OAAO,CN6hB1B,GAAO,CM5hB3B,wBAAwC,CAAE,OAAO,CNsG1B,GAAO,CMrG9B,kBAAkC,CAAE,OAAO,CN8b1B,GAAO,CM7bxB,iBAAiC,CAAE,OAAO,CNqjB1B,GAAO,CMpjBvB,wBAAwC,CAAE,OAAO,CNgL1B,GAAO,CM/K9B,iBAAiC,CAAE,OAAO,CNukB1B,GAAO,CMtkBvB,kBAAkC,CAAE,OAAO,CNqQ1B,GAAO,CMpQxB,gBAAgC,CAAE,OAAO,CNiW1B,GAAO,CMhWtB,mBAAmC,CAAE,OAAO,CN2d1B,GAAO,CM1dzB,qBAAqC,CAAE,OAAO,CNjD1B,GAAO,CMkD3B,uBAAuC,CAAE,OAAO,CN+V1B,GAAO,CM9V7B,kBAAkC,CAAE,OAAO,CNsjB1B,GAAO,CMrjBxB,yCACmC,CAAE,OAAO,CNgG1B,GAAO,CM/FzB,qCAAiC,CAAE,OAAO,CNoK1B,GAAO,CMnKvB,iBAAiC,CAAE,OAAO,CN0jB1B,GAAO,CMzjBvB,sBAAsC,CAAE,OAAO,CNoC1B,GAAO,CMnC5B,8BAC8B,CAAE,OAAO,CN+Y1B,GAAO,CM9YpB,gBAAgC,CAAE,OAAO,CNoM1B,GAAO,CMnMtB,mBAAmC,CAAE,OAAO,CNrD1B,GAAO,CMsDzB,eAA+B,CAAE,OAAO,CNhF1B,GAAO,CMiFrB,sBAAsC,CAAE,OAAO,CNrB1B,GAAO,CMsB5B,uBAAuC,CAAE,OAAO,CNoL1B,GAAO,CMnL7B,sBAAsC,CAAE,OAAO,CNkL1B,GAAO,CMjL5B,oBAAoC,CAAE,OAAO,CNmL1B,GAAO,CMlL1B,sBAAsC,CAAE,OAAO,CN+K1B,GAAO,CM9K5B,2DAA4C,CAAE,OAAO,CNrI1B,GAAO,CMsIlC,6DAA6C,CAAE,OAAO,CNjI1B,GAAO,CMkInC,0BAA0C,CAAE,OAAO,CNjI1B,GAAO,CMkIhC,4BAA4C,CAAE,OAAO,CNzI1B,GAAO,CM0IlC,gBAAgC,CAAE,OAAO,CN2J1B,GAAO,CM1JtB,iBAAiC,CAAE,OAAO,CN6lB1B,GAAO,CM5lBvB,gBAAgC,CAAE,OAAO,CNqe1B,GAAO,CMpetB,iBAAiC,CAAE,OAAO,CNyG1B,GAAO,CMxGvB,oBAAoC,CAAE,OAAO,CNzE1B,GAAO,CM0E1B,qBAAqC,CAAE,OAAO,CNlI1B,GAAO,CMmI3B,iCACgC,CAAE,OAAO,CNijB1B,GAAO,CMhjBtB,kDAC+B,CAAE,OAAO,CN4O1B,GAAO,CM3OrB,gBAAgC,CAAE,OAAO,CNd1B,GAAO,CMetB,gBAAgC,CAAE,OAAO,CN0G1B,GAAO,CMzGtB,kCACmC,CAAE,OAAO,CN6X1B,GAAO,CM5XzB,kCACkC,CAAE,OAAO,CN2F1B,GAAO,CM1FxB,oBAAoC,CAAE,OAAO,CN6S1B,GAAO,CM5S1B,mCACmC,CAAE,OAAO,CNqG1B,GAAO,CMpGzB,iBAAiC,CAAE,OAAO,CNgb1B,GAAO,CM/avB,qDAE+B,CAAE,OAAO,CNlI1B,GAAO,CMmIrB,kBAAkC,CAAE,OAAO,CNsO1B,GAAO,CMrOxB,kBAAkC,CAAE,OAAO,CNoO1B,GAAO,CMnOxB,wBAAwC,CAAE,OAAO,CN+b1B,GAAO,CM9b9B,oBAAoC,CAAE,OAAO,CN2gB1B,GAAO,CM1gB1B,gBAAgC,CAAE,OAAO,CNuc1B,GAAO,CMtctB,gBAAgC,CAAE,OAAO,CNyO1B,GAAO,CMxOtB,gBAAgC,CAAE,OAAO,CN6f1B,GAAO,CM5ftB,oBAAoC,CAAE,OAAO,CNmT1B,GAAO,CMlT1B,2BAA2C,CAAE,OAAO,CNoT1B,GAAO,CMnTjC,6BAA6C,CAAE,OAAO,CNgI1B,GAAO,CM/HnC,sBAAsC,CAAE,OAAO,CN4H1B,GAAO,CM3H5B,gBAAgC,CAAE,OAAO,CNqQ1B,GAAO,CMpQtB,wEAAqC,CAAE,OAAO,CNpF1B,GAAO,CMqF3B,mBAAmC,CAAE,OAAO,CN9E1B,GAAO,CM+EzB,qBAAqC,CAAE,OAAO,CNrF1B,GAAO,CMsF3B,sBAAsC,CAAE,OAAO,CNrF1B,GAAO,CMsF5B,kBAAkC,CAAE,OAAO,CNhC1B,GAAO,CMiCxB,mCAC+B,CAAE,OAAO,CN0Y1B,GAAO,CMzYrB,yCACoC,CAAE,OAAO,CN8Y1B,GAAO,CM7Y1B,sCACmC,CAAE,OAAO,CN2Y1B,GAAO,CM1YzB,mBAAmC,CAAE,OAAO,CNU1B,GAAO,CMTzB,mBAAmC,CAAE,OAAO,CNuM1B,GAAO,CMtMzB,sCAC+B,CAAE,OAAO,CNqf1B,GAAO,CMpfrB,iCACgC,CAAE,OAAO,CNoF1B,GAAO,CMnFtB,0CACqC,CAAE,OAAO,CN+a1B,GAAO,CM9a3B,oBAAoC,CAAE,OAAO,CN7C1B,GAAO,CM8C1B,qBAAqC,CAAE,OAAO,CN1C1B,GAAO,CM2C3B,gCAC+B,CAAE,OAAO,CNpI1B,GAAO,CMqIrB,kBAAkC,CAAE,OAAO,CN6W1B,GAAO,CM5WxB,mBAAmC,CAAE,OAAO,CNye1B,GAAO,CMxezB,qCACoC,CAAE,OAAO,CNrE1B,GAAO,CMsE1B,sBAAsC,CAAE,OAAO,CNqL1B,GAAO,CMpL5B,mBAAmC,CAAE,OAAO,CNG1B,GAAO,CMFzB,yBAAyC,CAAE,OAAO,CNnE1B,GAAO,CMoE/B,uBAAuC,CAAE,OAAO,CNnE1B,GAAO,CMoE7B,kBAAkC,CAAE,OAAO,CNif1B,GAAO,CMhfxB,sBAAsC,CAAE,OAAO,CN8Y1B,GAAO,CM7Y5B,mBAAmC,CAAE,OAAO,CNyZ1B,GAAO,CMxZzB,iBAAiC,CAAE,OAAO,CN9J1B,GAAO,CM+JvB,iBAAiC,CAAE,OAAO,CNlE1B,GAAO,CMmEvB,kBAAkC,CAAE,OAAO,CN1C1B,GAAO,CM2CxB,sBAAsC,CAAE,OAAO,CN8B1B,GAAO,CM7B5B,qBAAqC,CAAE,OAAO,CN1I1B,GAAO,CM2I3B,qBAAqC,CAAE,OAAO,CNsH1B,GAAO,CMrH3B,oBAAoC,CAAE,OAAO,CNrO1B,GAAO,CMsO1B,iBAAiC,CAAE,OAAO,CN4M1B,GAAO,CM3MvB,sBAAsC,CAAE,OAAO,CNU1B,GAAO,CMT5B,eAA+B,CAAE,OAAO,CN3K1B,GAAO,CM4KrB,mBAAmC,CAAE,OAAO,CNuF1B,GAAO,CMtFzB,sBAAsC,CAAE,OAAO,CN2Q1B,GAAO,CM1Q5B,4BAA4C,CAAE,OAAO,CNrO1B,GAAO,CMsOlC,6BAA6C,CAAE,OAAO,CNrO1B,GAAO,CMsOnC,0BAA0C,CAAE,OAAO,CNrO1B,GAAO,CMsOhC,4BAA4C,CAAE,OAAO,CNzO1B,GAAO,CM0OlC,qBAAqC,CAAE,OAAO,CNrO1B,GAAO,CMsO3B,sBAAsC,CAAE,OAAO,CNrO1B,GAAO,CMsO5B,mBAAmC,CAAE,OAAO,CNrO1B,GAAO,CMsOzB,qBAAqC,CAAE,OAAO,CNzO1B,GAAO,CM0O3B,kBAAkC,CAAE,OAAO,CNpD1B,GAAO,CMqDxB,iBAAiC,CAAE,OAAO,CN4I1B,GAAO,CM3IvB,iBAAiC,CAAE,OAAO,CNwY1B,GAAO,CMvYvB,yCACiC,CAAE,OAAO,CNuM1B,GAAO,CMtMvB,mBAAmC,CAAE,OAAO,CNzG1B,GAAO,CM0GzB,qBAAqC,CAAE,OAAO,CNyQ1B,GAAO,CMxQ3B,sBAAsC,CAAE,OAAO,CNyQ1B,GAAO,CMxQ5B,kBAAkC,CAAE,OAAO,CN+V1B,GAAO,CM9VxB,iBAAiC,CAAE,OAAO,CN9G1B,GAAO,CM+GvB,sCACgC,CAAE,OAAO,CNoR1B,GAAO,CMnRtB,qBAAqC,CAAE,OAAO,CN+C1B,GAAO,CM9C3B,mBAAmC,CAAE,OAAO,CNmB1B,GAAO,CMlBzB,wBAAwC,CAAE,OAAO,CNoB1B,GAAO,CMnB9B,kBAAkC,CAAE,OAAO,CNqU1B,GAAO,CMpUxB,kBAAkC,CAAE,OAAO,CN2B1B,GAAO,CM1BxB,gBAAgC,CAAE,OAAO,CNgL1B,GAAO,CM/KtB,kBAAkC,CAAE,OAAO,CN2B1B,GAAO,CM1BxB,qBAAqC,CAAE,OAAO,CNuH1B,GAAO,CMtH3B,iBAAiC,CAAE,OAAO,CNM1B,GAAO,CMLvB,yBAAyC,CAAE,OAAO,CNI1B,GAAO,CMH/B,mBAAmC,CAAE,OAAO,CN6X1B,GAAO,CM5XzB,eAA+B,CAAE,OAAO,CNhH1B,GAAO,CMiHrB,8CACoC,CAAE,OAAO,CNuQ1B,GAAO,CMtQ1B,2EAEsC,CAAE,OAAO,CNsV1B,GAAO,CMrV5B,yBAAyC,CAAE,OAAO,CNwI1B,GAAO,CMvI/B,eAA+B,CAAE,OAAO,CNhG1B,GAAO,CMiGrB,oBAAoC,CAAE,OAAO,CNvH1B,GAAO,CMwH1B,yCACuC,CAAE,OAAO,CNtJ1B,GAAO,CMuJ7B,mBAAmC,CAAE,OAAO,CNyO1B,GAAO,CMxOzB,eAA+B,CAAE,OAAO,CN0F1B,GAAO,CMzFrB,sBAAsC,CAAE,OAAO,CN1D1B,GAAO,CM2D5B,sBAAsC,CAAE,OAAO,CNkW1B,GAAO,CMjW5B,oBAAoC,CAAE,OAAO,CN4V1B,GAAO,CM3V1B,iBAAiC,CAAE,OAAO,CNlE1B,GAAO,CMmEvB,uBAAuC,CAAE,OAAO,CNgO1B,GAAO,CM/N7B,qBAAqC,CAAE,OAAO,CN2J1B,GAAO,CM1J3B,2BAA2C,CAAE,OAAO,CN2J1B,GAAO,CM1JjC,iBAAiC,CAAE,OAAO,CNsR1B,GAAO,CMrRvB,qBAAqC,CAAE,OAAO,CN5L1B,GAAO,CM6L3B,4BAA4C,CAAE,OAAO,CNxB1B,GAAO,CMyBlC,iBAAiC,CAAE,OAAO,CNuP1B,GAAO,CMtPvB,iBAAiC,CAAE,OAAO,CN6I1B,GAAO,CM5IvB,8BAA8C,CAAE,OAAO,CN9J1B,GAAO,CM+JpC,+BAA+C,CAAE,OAAO,CN9J1B,GAAO,CM+JrC,4BAA4C,CAAE,OAAO,CN9J1B,GAAO,CM+JlC,8BAA8C,CAAE,OAAO,CNlK1B,GAAO,CMmKpC,gBAAgC,CAAE,OAAO,CN8D1B,GAAO,CM7DtB,eAA+B,CAAE,OAAO,CNrH1B,GAAO,CMsHrB,iBAAiC,CAAE,OAAO,CNvS1B,GAAO,CMwSvB,qBAAqC,CAAE,OAAO,CN2Z1B,GAAO,CM1Z3B,mBAAmC,CAAE,OAAO,CNhN1B,GAAO,CMiNzB,qBAAqC,CAAE,OAAO,CN7F1B,GAAO,CM8F3B,qBAAqC,CAAE,OAAO,CN7F1B,GAAO,CM8F3B,qBAAqC,CAAE,OAAO,CN+O1B,GAAO,CM9O3B,sBAAsC,CAAE,OAAO,CNiM1B,GAAO,CMhM5B,iBAAiC,CAAE,OAAO,CN6W1B,GAAO,CM5WvB,uBAAuC,CAAE,OAAO,CN0I1B,GAAO,CMzI7B,wIAAyC,CAAE,OAAO,CN0I1B,GAAO,CMzI/B,mBAAmC,CAAE,OAAO,CNqF1B,GAAO,CMpFzB,qBAAqC,CAAE,OAAO,CNmF1B,GAAO,CMlF3B,uBAAuC,CAAE,OAAO,CNnL1B,GAAO,CMoL7B,wBAAwC,CAAE,OAAO,CN0K1B,GAAO,CMzK9B,+BAA+C,CAAE,OAAO,CNpF1B,GAAO,CMqFrC,uBAAuC,CAAE,OAAO,CNwP1B,GAAO,CMvP7B,kBAAkC,CAAE,OAAO,CNjJ1B,GAAO,CMkJxB,qDAC8C,CAAE,OAAO,CN/M1B,GAAO,CMgNpC,iDAC4C,CAAE,OAAO,CN9M1B,GAAO,CM+MlC,uDAC+C,CAAE,OAAO,CNjN1B,GAAO,CMkNrC,8BAC8B,CAAE,OAAO,CNvG1B,GAAO,CMwGpB,cAA8B,CAAE,OAAO,CNhC1B,GAAO,CMiCpB,gCAC8B,CAAE,OAAO,CNqY1B,GAAO,CMpYpB,+BAC8B,CAAE,OAAO,CN4C1B,GAAO,CM3CpB,2DAG8B,CAAE,OAAO,CNgD1B,GAAO,CM/CpB,iDAE8B,CAAE,OAAO,CNiN1B,GAAO,CMhNpB,6BAC8B,CAAE,OAAO,CN+C1B,GAAO,CM9CpB,iCAC8B,CAAE,OAAO,CN3P1B,GAAO,CM4PpB,eAA+B,CAAE,OAAO,CNhG1B,GAAO,CMiGrB,oBAAoC,CAAE,OAAO,CNpF1B,GAAO,CMqF1B,yBAAyC,CAAE,OAAO,CN0P1B,GAAO,CMzP/B,0BAA0C,CAAE,OAAO,CN0P1B,GAAO,CMzPhC,0BAA0C,CAAE,OAAO,CN0P1B,GAAO,CMzPhC,2BAA2C,CAAE,OAAO,CN0P1B,GAAO,CMzPjC,2BAA2C,CAAE,OAAO,CN6P1B,GAAO,CM5PjC,4BAA4C,CAAE,OAAO,CN6P1B,GAAO,CM5PlC,oBAAoC,CAAE,OAAO,CNkU1B,GAAO,CMjU1B,sBAAsC,CAAE,OAAO,CN8T1B,GAAO,CM7T5B,yBAAyC,CAAE,OAAO,CNya1B,GAAO,CMxa/B,kBAAkC,CAAE,OAAO,CNsa1B,GAAO,CMraxB,eAA+B,CAAE,OAAO,CN2Z1B,GAAO,CM1ZrB,sBAAsC,CAAE,OAAO,CN2Z1B,GAAO,CM1Z5B,uBAAuC,CAAE,OAAO,CNoa1B,GAAO,CMna7B,kBAAkC,CAAE,OAAO,CNxJ1B,GAAO,CMyJxB,yBAAyC,CAAE,OAAO,CN8P1B,GAAO,CM7P/B,oBAAoC,CAAE,OAAO,CNgB1B,GAAO,CMf1B,iBAAiC,CAAE,OAAO,CNpF1B,GAAO,CMqFvB,cAA8B,CAAE,OAAO,CN3W1B,GAAO,CM4WpB,2CAAoC,CAAE,OAAO,CN/R1B,GAAO,CMgS1B,2BAA2C,CAAE,OAAO,CN/R1B,GAAO,CMgSjC,iBAAiC,CAAE,OAAO,CN+U1B,GAAO,CM9UvB,wBAAwC,CAAE,OAAO,CN+U1B,GAAO,CM9U9B,0BAA0C,CAAE,OAAO,CNgD1B,GAAO,CM/ChC,wBAAwC,CAAE,OAAO,CNkD1B,GAAO,CMjD9B,0BAA0C,CAAE,OAAO,CN+C1B,GAAO,CM9ChC,2BAA2C,CAAE,OAAO,CN+C1B,GAAO,CM9CjC,gBAAgC,CAAE,OAAO,CNjW1B,GAAO,CMkWtB,kBAAkC,CAAE,OAAO,CNmY1B,GAAO,CMlYxB,kBAAkC,CAAE,OAAO,CN7W1B,GAAO,CM8WxB,gBAAgC,CAAE,OAAO,CNkC1B,GAAO,CMjCtB,mBAAmC,CAAE,OAAO,CN5K1B,GAAO,CM6KzB,gBAAgC,CAAE,OAAO,CNgN1B,GAAO,CM/MtB,qBAAqC,CAAE,OAAO,CNxF1B,GAAO,CMyF3B,iBAAiC,CAAE,OAAO,CN4T1B,GAAO,CM3TvB,iBAAiC,CAAE,OAAO,CNtI1B,GAAO,CMuIvB,eAA+B,CAAE,OAAO,CN6C1B,GAAO,CM5CrB,qCACmC,CAAE,OAAO,CN5D1B,GAAO,CM6DzB,gBAAgC,CAAE,OAAO,CN8P1B,GAAO,CM7PtB,iBAAiC,CAAE,OAAO,CNuE1B,GAAO,CMtEvB,kBAAkC,CAAE,OAAO,CN9W1B,GAAO,CM+WxB,cAA8B,CAAE,OAAO,CNtS1B,GAAO,CMuSpB,aAA6B,CAAE,OAAO,CNiW1B,GAAO,CMhWnB,gBAAgC,CAAE,OAAO,CNuW1B,GAAO,CMtWtB,iBAAiC,CAAE,OAAO,CN+I1B,GAAO,CM9IvB,oBAAoC,CAAE,OAAO,CNkF1B,GAAO,CMjF1B,yBAAyC,CAAE,OAAO,CN6N1B,GAAO,CM5N/B,+BAA+C,CAAE,OAAO,CN/W1B,GAAO,CMgXrC,8BAA8C,CAAE,OAAO,CNjX1B,GAAO,CMkXpC,qDAC8C,CAAE,OAAO,CNzR1B,GAAO,CM0RpC,uBAAuC,CAAE,OAAO,CNnM1B,GAAO,CMoM7B,qBAAqC,CAAE,OAAO,CNiW1B,GAAO,CMhW3B,uBAAuC,CAAE,OAAO,CNoV1B,GAAO,CMnV7B,sCAC8B,CAAE,OAAO,CN0S1B,GAAO,CMzSpB,wEAAwC,CAAE,OAAO,CN0G1B,GAAO,CMzG9B,wBAAwC,CAAE,OAAO,CN4M1B,GAAO,CM3M9B,gBAAgC,CAAE,OAAO,CNsL1B,GAAO,CMrLtB,0BAA0C,CAAE,OAAO,CNzL1B,GAAO,CM0LhC,oBAAoC,CAAE,OAAO,CNoW1B,GAAO,CMnW1B,iBAAiC,CAAE,OAAO,CN8D1B,GAAO,CM7DvB,4DAEqC,CAAE,OAAO,CN8S1B,GAAO,CM7S3B,iDACyC,CAAE,OAAO,CN1F1B,GAAO,CM2F/B,gBAAgC,CAAE,OAAO,CNsW1B,GAAO,CMrWtB,iBAAiC,CAAE,OAAO,CNlG1B,GAAO,CMmGvB,iBAAiC,CAAE,OAAO,CNgH1B,GAAO,CM/GvB,wBAAwC,CAAE,OAAO,CNiH1B,GAAO,CMhH9B,6BAA6C,CAAE,OAAO,CNyN1B,GAAO,CMxNnC,sBAAsC,CAAE,OAAO,CNuN1B,GAAO,CMtN5B,oBAAoC,CAAE,OAAO,CN/N1B,GAAO,CMgO1B,eAA+B,CAAE,OAAO,CN5N1B,GAAO,CM6NrB,wBAAwC,CAAE,OAAO,CN2E1B,GAAO,CM1E9B,yBAAyC,CAAE,OAAO,CNyE1B,GAAO,CMxE/B,iBAAiC,CAAE,OAAO,CNvN1B,GAAO,CMwNvB,iBAAiC,CAAE,OAAO,CNzC1B,GAAO,CM0CvB,mBAAmC,CAAE,OAAO,CNpC1B,GAAO,CMqCzB,cAA8B,CAAE,OAAO,CNtL1B,GAAO,CMuLpB,mBAAmC,CAAE,OAAO,CN7U1B,GAAO,CM8UzB,gBAAgC,CAAE,OAAO,CN1R1B,GAAO,CM2RtB,cAA8B,CAAE,OAAO,CNsD1B,GAAO,CMrDpB,gBAAgC,CAAE,OAAO,CNmL1B,GAAO,CMlLtB,eAA+B,CAAE,OAAO,CNrP1B,GAAO,CMsPrB,gBAAgC,CAAE,OAAO,CNrP1B,GAAO,CMsPtB,kBAAkC,CAAE,OAAO,CN7W1B,GAAO,CM8WxB,yBAAyC,CAAE,OAAO,CN7W1B,GAAO,CM8W/B,gBAAgC,CAAE,OAAO,CN0L1B,GAAO,CMzLtB,uBAAuC,CAAE,OAAO,CN0L1B,GAAO,CMzL7B,kBAAkC,CAAE,OAAO,CNyF1B,GAAO,CMxFxB,oCAC8B,CAAE,OAAO,CNzU1B,GAAO,CM0UpB,8BAC+B,CAAE,OAAO,CN+M1B,GAAO,CM9MrB,eAA+B,CAAE,OAAO,CN4P1B,GAAO,CM3PrB,kBAAkC,CAAE,OAAO,CNuK1B,GAAO,CMtKxB,qBAAqC,CAAE,OAAO,CNtP1B,GAAO,CMuP3B,qBAAqC,CAAE,OAAO,CNiK1B,GAAO,CMhK3B,mBAAmC,CAAE,OAAO,CN9P1B,GAAO,CM+PzB,qBAAqC,CAAE,OAAO,CN/L1B,GAAO,CMgM3B,sBAAsC,CAAE,OAAO,CNxL1B,GAAO,CMyL5B,uBAAuC,CAAE,OAAO,CNrM1B,GAAO,CMsM7B,4BAA4C,CAAE,OAAO,CN/L1B,GAAO,CMgMlC,yEAEuC,CAAE,OAAO,CNxM1B,GAAO,CMyM7B,+CACyC,CAAE,OAAO,CN9M1B,GAAO,CM+M/B,+CACuC,CAAE,OAAO,CN/M1B,GAAO,CMgN7B,+CACuC,CAAE,OAAO,CNpM1B,GAAO,CMqM7B,sBAAsC,CAAE,OAAO,CNjN1B,GAAO,CMkN5B,eAA+B,CAAE,OAAO,CNuR1B,GAAO,CMtRrB,kBAAkC,CAAE,OAAO,CN5S1B,GAAO,CM6SxB,mBAAmC,CAAE,OAAO,CN9E1B,GAAO,CM+EzB,uGAIoC,CAAE,OAAO,CNnE1B,GAAO,CMoE1B,yBAAyC,CAAE,OAAO,CN/T1B,GAAO,CMgU/B,oDAEgC,CAAE,OAAO,CNqD1B,GAAO,CMpDtB,+BACiC,CAAE,OAAO,CNnQ1B,GAAO,CMoQvB,qBAAqC,CAAE,OAAO,CNzK1B,GAAO,CM0K3B,cAA8B,CAAE,OAAO,CN3K1B,GAAO,CM4KpB,0EAEsC,CAAE,OAAO,CNxJ1B,GAAO,CMyJ5B,wBAAwC,CAAE,OAAO,CN2K1B,GAAO,CM1K9B,aAA6B,CAAE,OAAO,CNiC1B,GAAO,CMhCnB,mCACiC,CAAE,OAAO,CN0Q1B,GAAO,CMzQvB,sCACsC,CAAE,OAAO,CNV1B,GAAO,CMW5B,0CACwC,CAAE,OAAO,CNX1B,GAAO,CMY9B,kBAAkC,CAAE,OAAO,CN1I1B,GAAO,CM2IxB,sBAAsC,CAAE,OAAO,CNlV1B,GAAO,CMmV5B,iBAAiC,CAAE,OAAO,CNjJ1B,GAAO,CMkJvB,oBAAoC,CAAE,OAAO,CNb1B,GAAO,CMc1B,kBAAkC,CAAE,OAAO,CN+F1B,GAAO,CM9FxB,oBAAoC,CAAE,OAAO,CNuE1B,GAAO,CMtE1B,2BAA2C,CAAE,OAAO,CNuE1B,GAAO,CMtEjC,eAA+B,CAAE,OAAO,CNzZ1B,GAAO,CM0ZrB,4CACmC,CAAE,OAAO,CN5M1B,GAAO,CM6MzB,cAA8B,CAAE,OAAO,CN0M1B,GAAO,CMzMpB,qBAAqC,CAAE,OAAO,CNxa1B,GAAO,CMya3B,eAA+B,CAAE,OAAO,CNI1B,GAAO,CMHrB,qBAAqC,CAAE,OAAO,CNuF1B,GAAO,CMtF3B,iBAAiC,CAAE,OAAO,CN2M1B,GAAO,CM1MvB,eAA+B,CAAE,OAAO,CN+Q1B,GAAO,CM9QrB,sBAAsC,CAAE,OAAO,CNzC1B,GAAO,CM0C5B,eAA+B,CAAE,OAAO,CNwP1B,GAAO,CMvPrB,qBAAqC,CAAE,OAAO,CNrZ1B,GAAO,CMsZ3B,iBAAiC,CAAE,OAAO,CNvB1B,GAAO,CMwBvB,wBAAwC,CAAE,OAAO,CN3L1B,GAAO,CM4L9B,kBAAkC,CAAE,OAAO,CN5X1B,GAAO,CM6XxB,wBAAwC,CAAE,OAAO,CNhY1B,GAAO,CMiY9B,sBAAsC,CAAE,OAAO,CNnY1B,GAAO,CMoY5B,kBAAkC,CAAE,OAAO,CNtY1B,GAAO,CMuYxB,oBAAoC,CAAE,OAAO,CNlY1B,GAAO,CMmY1B,oBAAoC,CAAE,OAAO,CNlY1B,GAAO,CMmY1B,qBAAqC,CAAE,OAAO,CN3b1B,GAAO,CM4b3B,uBAAuC,CAAE,OAAO,CN3b1B,GAAO,CM4b7B,gBAAgC,CAAE,OAAO,CN+K1B,GAAO,CM9KtB,oBAAoC,CAAE,OAAO,CNnV1B,GAAO,CMoV1B,aAA6B,CAAE,OAAO,CN9d1B,GAAO,CM+dnB,qBAAqC,CAAE,OAAO,CN5R1B,GAAO,CM6R3B,sBAAsC,CAAE,OAAO,CN/C1B,GAAO,CMgD5B,wBAAwC,CAAE,OAAO,CN9b1B,GAAO,CM+b9B,qBAAqC,CAAE,OAAO,CNtf1B,GAAO,CMuf3B,oBAAoC,CAAE,OAAO,CN/B1B,GAAO,CMgC1B,qBAAqC,CAAE,OAAO,CNzH1B,GAAO,CM0H3B,iBAAiC,CAAE,OAAO,CNvI1B,GAAO,CMwIvB,wBAAwC,CAAE,OAAO,CNvI1B,GAAO,CMwI9B,qBAAqC,CAAE,OAAO,CN4J1B,GAAO,CM3J3B,oBAAoC,CAAE,OAAO,CN4J1B,GAAO,CM3J1B,kBAAkC,CAAE,OAAO,CNxc1B,GAAO,CMycxB,cAA8B,CAAE,OAAO,CNjb1B,GAAO,CMkbpB,kBAAkC,CAAE,OAAO,CNvJ1B,GAAO,CMwJxB,oBAAoC,CAAE,OAAO,CN3gB1B,GAAO,CM4gB1B,aAA6B,CAAE,OAAO,CN7Z1B,GAAO,CM8ZnB,kDAE8B,CAAE,OAAO,CNzK1B,GAAO,CM0KpB,mBAAmC,CAAE,OAAO,CNpG1B,GAAO,CMqGzB,qBAAqC,CAAE,OAAO,CNxb1B,GAAO,CMyb3B,yBAAyC,CAAE,OAAO,CN5W1B,GAAO,CM6W/B,mBAAmC,CAAE,OAAO,CN9V1B,GAAO,CM+VzB,mBAAmC,CAAE,OAAO,CN9P1B,GAAO,CM+PzB,kBAAkC,CAAE,OAAO,CNrJ1B,GAAO,CMsJxB,iBAAiC,CAAE,OAAO,CNe1B,GAAO,CMdvB,uBAAuC,CAAE,OAAO,CN2B1B,GAAO,CM1B7B,sBAAsC,CAAE,OAAO,CNoC1B,GAAO,CMnC5B,mBAAmC,CAAE,OAAO,CNqC1B,GAAO,CMpCzB,oBAAoC,CAAE,OAAO,CN5a1B,GAAO,CM6a1B,0BAA0C,CAAE,OAAO,CN9a1B,GAAO,CM+ahC,kBAAkC,CAAE,OAAO,CN/V1B,GAAO,CMgWxB,eAA+B,CAAE,OAAO,CNoB1B,GAAO,CMnBrB,sBAAsC,CAAE,OAAO,CN8K1B,GAAO,CM7K5B,qBAAqC,CAAE,OAAO,CN/F1B,GAAO,CMgG3B,sBAAsC,CAAE,OAAO,CN6E1B,GAAO,CM5E5B,oBAAoC,CAAE,OAAO,CN9M1B,GAAO,CM+M1B,gBAAgC,CAAE,OAAO,CN+K1B,GAAO,CM9KtB,eAA+B,CAAE,OAAO,CN7H1B,GAAO,CM8HrB,kBAAkC,CAAE,OAAO,CNnH1B,GAAO,CMoHxB,0CACsC,CAAE,OAAO,CNkI1B,GAAO,CMjI5B,0BAA0C,CAAE,OAAO,CNkI1B,GAAO,CMjIhC,uBAAuC,CAAE,OAAO,CN0K1B,GAAO,CMzK7B,sBAAsC,CAAE,OAAO,CNlI1B,GAAO,CMmI5B,qBAAqC,CAAE,OAAO,CNyK1B,GAAO,CMxK3B,sBAAsC,CAAE,OAAO,CNnI1B,GAAO,CMoI5B,wBAAwC,CAAE,OAAO,CNlI1B,GAAO,CMmI9B,wBAAwC,CAAE,OAAO,CNpI1B,GAAO,CMqI9B,iBAAiC,CAAE,OAAO,CN1G1B,GAAO,CM2GvB,qBAAqC,CAAE,OAAO,CN7Q1B,GAAO,CM8Q3B,4BAA4C,CAAE,OAAO,CN1U1B,GAAO,CM2UlC,sBAAsC,CAAE,OAAO,CNzE1B,GAAO,CM0E5B,mBAAmC,CAAE,OAAO,CNkL1B,GAAO,CMjLzB,iBAAiC,CAAE,OAAO,CNX1B,GAAO,CMYvB,oBAAoC,CAAE,OAAO,CNuJ1B,GAAO,CMtJ1B,qBAAqC,CAAE,OAAO,CNwJ1B,GAAO,CMvJ3B,+BAC8B,CAAE,OAAO,CN/f1B,GAAO,CMggBpB,kBAAkC,CAAE,OAAO,CN4J1B,GAAO,CM3JxB,gBAAgC,CAAE,OAAO,CN8G1B,GAAO,CM7GtB,iBAAiC,CAAE,OAAO,CNwD1B,GAAO,CMvDvB,iBAAiC,CAAE,OAAO,CN9I1B,GAAO,CM+IvB,qCACuC,CAAE,OAAO,CN0L1B,GAAO,CMzL7B,wBAAwC,CAAE,OAAO,CNjH1B,GAAO,CMkH9B,mBAAmC,CAAE,OAAO,CNrH1B,GAAO,CMsHzB,uBAAuC,CAAE,OAAO,CNnW1B,GAAO,CMoW7B,+DAEuC,CAAE,OAAO,CN/gB1B,GAAO,CMghB7B,sDACiD,CAAE,OAAO,CN9gB1B,GAAO,CM+gBvC,4CACuC,CAAE,OAAO,CNlhB1B,GAAO,CMmhB7B,+CAC0C,CAAE,OAAO,CNnhB1B,GAAO,CMohBhC,6CACwC,CAAE,OAAO,CNxhB1B,GAAO,CMyhB9B,wBAAwC,CAAE,OAAO,CN3I1B,GAAO,CM4I9B,mBAAmC,CAAE,OAAO,CN3O1B,GAAO,CM4OzB,uBAAuC,CAAE,OAAO,CNxI1B,GAAO,CMyI7B,yBAAyC,CAAE,OAAO,CNxI1B,GAAO,CMyI/B,sBAAsC,CAAE,OAAO,CNwB1B,GAAO,CMvB5B,wBAAwC,CAAE,OAAO,CNwB1B,GAAO,CMvB9B,iBAAiC,CAAE,OAAO,CN/d1B,GAAO,CMgevB,yBAAyC,CAAE,OAAO,CNle1B,GAAO,CMme/B,gBAAgC,CAAE,OAAO,CNpc1B,GAAO,CMqctB,wBAAwC,CAAE,OAAO,CNljB1B,GAAO,CMmjB9B,sBAAsC,CAAE,OAAO,CNxP1B,GAAO,CMyP5B,iDAC0C,CAAE,OAAO,CNzP1B,GAAO,CM0PhC,gDACyC,CAAE,OAAO,CN7P1B,GAAO,CM8P/B,+CACwC,CAAE,OAAO,CNhQ1B,GAAO,CMiQ9B,oBAAoC,CAAE,OAAO,CNrQ1B,GAAO,CMsQ1B,6CACsC,CAAE,OAAO,CNxR1B,GAAO,CMyR5B,8CACuC,CAAE,OAAO,CN7R1B,GAAO,CM8R7B,0BAA0C,CAAE,OAAO,CN1R1B,GAAO,CM2RhC,wBAAwC,CAAE,OAAO,CNpS1B,GAAO,CMqS9B,uBAAuC,CAAE,OAAO,CN3R1B,GAAO,CM4R7B,yBAAyC,CAAE,OAAO,CN/R1B,GAAO,CMgS/B,uBAAuC,CAAE,OAAO,CNjS1B,GAAO,CMkS7B,oBAAoC,CAAE,OAAO,CN+D1B,GAAO,CM9D1B,qBAAqC,CAAE,OAAO,CN/F1B,GAAO,CMgG3B,2BAA2C,CAAE,OAAO,CN/b1B,GAAO,CMgcjC,aAA6B,CAAE,OAAO,CNtU1B,GAAO,CMuUnB,oBAAoC,CAAE,OAAO,CNtU1B,GAAO,CMuU1B,sBAAsC,CAAE,OAAO,CNkE1B,GAAO,CMjE5B,wBAAwC,CAAE,OAAO,CNrK1B,GAAO,CMsK9B,+BAA+C,CAAE,OAAO,CNrK1B,GAAO,CMsKrC,qBAAqC,CAAE,OAAO,CN5U1B,GAAO,CM6U3B,sBAAsC,CAAE,OAAO,CNwH1B,GAAO,CMvH5B,iBAAiC,CAAE,OAAO,CNnF1B,GAAO,CMoFvB,iBAAiC,CAAE,OAAO,CNze1B,GAAO,CM0evB,kBAAkC,CAAE,OAAO,CN9W1B,GAAO,CM+WxB,gBAAgC,CAAE,OAAO,CNxK1B,GAAO,CMyKtB,4BAA4C,CAAE,OAAO,CNpQ1B,GAAO,CMqQlC,mCACqC,CAAE,OAAO,CNS1B,GAAO,CMR3B,iBAAiC,CAAE,OAAO,CNjd1B,GAAO,CMkdvB,gBAAgC,CAAE,OAAO,CNzoB1B,GAAO,CM0oBtB,iBAAiC,CAAE,OAAO,CN/nB1B,GAAO,CMgoBvB,0BAA0C,CAAE,OAAO,CN3hB1B,GAAO,CM4hBhC,2BAA2C,CAAE,OAAO,CN9hB1B,GAAO,CM+hBjC,2BAA2C,CAAE,OAAO,CN5hB1B,GAAO,CM6hBjC,2BAA2C,CAAE,OAAO,CNjiB1B,GAAO,CMkiBjC,mBAAmC,CAAE,OAAO,CNpR1B,GAAO,CMqRzB,kBAAkC,CAAE,OAAO,CN5N1B,GAAO,CM6NxB,oBAAoC,CAAE,OAAO,CN5N1B,GAAO,CM6N1B,gBAAgC,CAAE,OAAO,CN/N1B,GAAO,CMgOtB,cAA8B,CAAE,OAAO,CNlO1B,GAAO,CMmOpB,qBAAqC,CAAE,OAAO,CNpe1B,GAAO,CMqe3B,uBAAuC,CAAE,OAAO,CNpe1B,GAAO,CMqe7B,gBAAgC,CAAE,OAAO,CNtS1B,GAAO,CMuStB,gBAAgC,CAAE,OAAO,CNiF1B,GAAO,CMhFtB,oBAAoC,CAAE,OAAO,CNlkB1B,GAAO,CMmkB1B,oBAAoC,CAAE,OAAO,CNrX1B,GAAO,CMsX1B,uBAAuC,CAAE,OAAO,CNpI1B,GAAO,CMqI7B,eAA+B,CAAE,OAAO,CNpc1B,GAAO,CMqcrB,0BAA0C,CAAE,OAAO,CNhe1B,GAAO,CMiehC,mBAAmC,CAAE,OAAO,CNpf1B,GAAO,CMqfzB,eAA+B,CAAE,OAAO,CNlN1B,GAAO,CMmNrB,uBAAuC,CAAE,OAAO,CN1X1B,GAAO,CM2X7B,cAA8B,CAAE,OAAO,CNoD1B,GAAO,CMnDpB,uBAAuC,CAAE,OAAO,CN3J1B,GAAO,CM4J7B,mBAAmC,CAAE,OAAO,CNzN1B,GAAO,CM0NzB,iBAAiC,CAAE,OAAO,CNlH1B,GAAO,CMmHvB,uBAAuC,CAAE,OAAO,CN7L1B,GAAO,CM8L7B,yBAAyC,CAAE,OAAO,CN7L1B,GAAO,CM8L/B,sBAAsC,CAAE,OAAO,CN3C1B,GAAO,CM4C5B,wBAAwC,CAAE,OAAO,CN3C1B,GAAO,CM4C9B,uBAAuC,CAAE,OAAO,CNrG1B,GAAO,CMsG7B,0BAA0C,CAAE,OAAO,CNrG1B,GAAO,CMsGhC,kBAAkC,CAAE,OAAO,CN7U1B,GAAO,CM8UxB,oBAAoC,CAAE,OAAO,CNnlB1B,GAAO,CMolB1B,sBAAsC,CAAE,OAAO,CNnlB1B,GAAO,CMolB5B,kBAAkC,CAAE,OAAO,CN/L1B,GAAO,CMgMxB,qCAAiC,CAAE,OAAO,CNlX1B,GAAO,CMmXvB,qBAAqC,CAAE,OAAO,CNkF1B,GAAO,CMjF3B,kBAAkC,CAAE,OAAO,CNmF1B,GAAO,CMlFxB,iBAAiC,CAAE,OAAO,CN9c1B,GAAO,CM+cvB,2BAA2C,CAAE,OAAO,CN2B1B,GAAO,CM1BjC,yBAAyC,CAAE,OAAO,CNmE1B,GAAO,CMlE/B,4BAA4C,CAAE,OAAO,CNxK1B,GAAO,CMyKlC,gBAAgC,CAAE,OAAO,CN9lB1B,GAAO,CM+lBtB,4BAA4C,CAAE,OAAO,CNtoB1B,GAAO,CMuoBlC,+BAA+C,CAAE,OAAO,CNqD1B,GAAO,CMpDrC,kBAAkC,CAAE,OAAO,CNxlB1B,GAAO,CMylBxB,sCAAsD,CAAE,OAAO,CN5oB1B,GAAO,CM6oB5C,0EAC8D,CAAE,OAAO,CN9qB1B,GAAO,CM+qBpD,8DAE+B,CAAE,OAAO,CNvf1B,GAAO,CMwfrB,gBAAgC,CAAE,OAAO,CNhY1B,GAAO,CMiYtB,kBAAkC,CAAE,OAAO,CNhY1B,GAAO,CMiYxB,2CACwC,CAAE,OAAO,CN1H1B,GAAO,CM2H9B,qBAAqC,CAAE,OAAO,CNzR1B,GAAO,CM0R3B,iBAAiC,CAAE,OAAO,CNiC1B,GAAO,CMhCvB,wBAAwC,CAAE,OAAO,CNiC1B,GAAO,CMhC9B,mBAAmC,CAAE,OAAO,CNlH1B,GAAO,CMmHzB,yBAAyC,CAAE,OAAO,CNlH1B,GAAO,CMmH/B,0BAA0C,CAAE,OAAO,CNlH1B,GAAO,CMmHhC,qBAAqC,CAAE,OAAO,CNrN1B,GAAO,CMsN3B,sBAAsC,CAAE,OAAO,CNpb1B,GAAO,CMqb5B,gBAAgC,CAAE,OAAO,CNmE1B,GAAO,CMlEtB,oBAAoC,CAAE,OAAO,CNpD1B,GAAO,CMqD1B,6DAC+C,CAAE,OAAO,CNzY1B,GAAO,CM0YrC,qCACuC,CAAE,OAAO,CN7a1B,GAAO,CM8a7B,sBAAsC,CAAE,OAAO,CNtX1B,GAAO,CMuX5B,wBAAwC,CAAE,OAAO,CNlf1B,GAAO,CMmf9B,0BAA0C,CAAE,OAAO,CNlf1B,GAAO,CMmfhC,iBAAiC,CAAE,OAAO,CNtT1B,GAAO,CMuTvB,uBAAuC,CAAE,OAAO,CNptB1B,GAAO,CMqtB7B,yBAAyC,CAAE,OAAO,CNptB1B,GAAO,CMqtB/B,wCACuC,CAAE,OAAO,CNrtB1B,GAAO,CMstB7B,4CACyC,CAAE,OAAO,CNttB1B,GAAO,CMutB/B,sBAAsC,CAAE,OAAO,CNJ1B,GAAO,CMK5B,wBAAwC,CAAE,OAAO,CNJ1B,GAAO,CMK9B,iBAAiC,CAAE,OAAO,CNH1B,GAAO,CMIvB,mBAAmC,CAAE,OAAO,CN3W1B,GAAO,CM4WzB,6CACkC,CAAE,OAAO,CN5W1B,GAAO,CM6WxB,iDACoC,CAAE,OAAO,CN7W1B,GAAO,CM8W1B,gBAAgC,CAAE,OAAO,CNtN1B,GAAO,CMuNtB,yBAAyC,CAAE,OAAO,CN3b1B,GAAO,CM4b/B,mBAAmC,CAAE,OAAO,CNtF1B,GAAO,CMuFzB,2EAE2C,CAAE,OAAO,CNxE1B,GAAO,CMyEjC,8DACqD,CAAE,OAAO,CNvE1B,GAAO,CMwE3C,oDAC2C,CAAE,OAAO,CN3E1B,GAAO,CM4EjC,uDAC8C,CAAE,OAAO,CN5E1B,GAAO,CM6EpC,qDAC4C,CAAE,OAAO,CNjF1B,GAAO,CMkFlC,iBAAiC,CAAE,OAAO,CN3K1B,GAAO,CM4KvB,iDAE+B,CAAE,OAAO,CNzrB1B,GAAO,CM0rBrB,kBAAkC,CAAE,OAAO,CNlP1B,GAAO,CMmPxB,0BAA0C,CAAE,OAAO,CNK1B,GAAO,CMJhC,0BAA0C,CAAE,OAAO,CNK1B,GAAO,CMJhC,yBAAyC,CAAE,OAAO,CNK1B,GAAO,CMJ/B,kDACuC,CAAE,OAAO,CND1B,GAAO,CME7B,sDACyC,CAAE,OAAO,CNF1B,GAAO,CMG/B,mBAAmC,CAAE,OAAO,CNxsB1B,GAAO,CMysBzB,eAA+B,CAAE,OAAO,CNpb1B,GAAO,CMqbrB,eAA+B,CAAE,OAAO,CN1hB1B,GAAO,CM2hBrB,eAA+B,CAAE,OAAO,CNxY1B,GAAO,CMyYrB,kBAAkC,CAAE,OAAO,CN/O1B,GAAO,CMgPxB,kBAAkC,CAAE,OAAO,CNziB1B,GAAO,CM0iBxB,oBAAoC,CAAE,OAAO,CNjU1B,GAAO,CMkU1B,sBAAsC,CAAE,OAAO,CN7K1B,GAAO,CM8K5B,sBAAsC,CAAE,OAAO,CNhI1B,GAAO,CMiI5B,qBAAqC,CAAE,OAAO,CNJ1B,GAAO,CMK3B,iBAAiC,CAAE,OAAO,CNxU1B,GAAO,COzcvB,QAAS,CH8BP,QAAQ,CAAE,QAAQ,CAClB,KAAK,CAAE,GAAG,CACV,MAAM,CAAE,GAAG,CACX,OAAO,CAAE,CAAC,CACV,MAAM,CAAE,IAAI,CACZ,QAAQ,CAAE,MAAM,CAChB,IAAI,CAAE,gBAAa,CACnB,MAAM,CAAE,CAAC,CAUT,kDACQ,CACN,QAAQ,CAAE,MAAM,CAChB,KAAK,CAAE,IAAI,CACX,MAAM,CAAE,IAAI,CACZ,MAAM,CAAE,CAAC,CACT,QAAQ,CAAE,OAAO,CACjB,IAAI,CAAE,IAAI,CIvDd,swBAAK,CACH,WAAW,CAAE,OAAO,CACpB,y5BAAQ,CACN,WAAW,CC+BuB,aAAa,CD9B/C,OAAO,CAAE,YAAY,CACrB,UAAU,CAAE,MAAM,CAClB,WAAW,CAAE,MAAM,CACnB,WAAW,CAAE,CAAC,CACd,eAAe,CAAE,OAAO,CAM5B,86BAAkB,CAChB,OAAO,CAAE,YAAY,CACrB,eAAe,CAAE,OAAO,CAGxB,muEAAgB,CACd,OAAO,CAAE,MAAM,CACf,2wEAAuB,CACrB,WAAW,CAAE,KAAI,CACnB,utEAAsB,CACpB,OAAO,CAAE,YAAY,CAE3B,2iBAA2B,CACzB,OAAO,CAAE,GAAE,CjBpBL,kBAAoB,CAAE,qBAAM,CAK5B,eAAiB,CAAE,qBAAM,CAezB,UAAY,CAAE,qBAAM,CiBE5B,+nBAAiC,CAC/B,OAAO,CAAE,CAAC,CAGV,mtCAAuB,CACrB,SAAS,CAAE,IAAI,CACf,cAAc,CAAE,IAAI,CEpBxB,0PAAS,CACP,OAAO,CAAE,IAAqB,CAC9B,WAAW,CDayB,IAAI,CCZxC,aAAa,CDYuB,IAAI,CCXxC,UAAU,CAAE,OAAmB,CAEjC,8CAAe,CACb,KAAK,CCe+B,IAAM,CDd1C,WAAW,CAAE,IAAI,CACjB,OAAO,CAAE,KAAK,CACd,KAAK,CCY+B,IAAM,CDX1C,UAAU,CAAE,OAAkB,CAC9B,MAAM,CAAE,KAAsB,CAC9B,OAAO,CAAE,QAA2C,CACpD,aAAa,CAAE,IAAqB,CAEtC,0ZAAyB,CACvB,UAAU,CAAE,OAAkB,CAC9B,mxCAAe,CACb,UAAU,CAAE,OAAiB,CACjC,kYAA0B,CACxB,UAAU,CAAE,OAAmB,CAC/B,ouCAAe,CACb,UAAU,CAAE,OAAoB,CAEpC,sYAAuB,CACrB,UAAU,CAAE,OAAmB,CAC/B,yuCAAe,CACb,UAAU,CAAE,OAAkB,CAElC,mZAA0B,CACxB,UAAU,CAAE,OAAuB,CACnC,swCAAe,CACb,UAAU,CAAE,OAAqB,CAErC,scAA0B,CACxB,UAAU,CCF0B,OAAmB,CDGvD,42CAAe,CACb,KAAK,CCpB6B,OAAW,CDqB7C,UAAU,CCHwB,OAAmB,CDIvD,8dAAC,CACC,KAAK,CCb6B,OAAK,CDe3C,sZAAsB,CACpB,aAAa,CAAE,CAAC,CAsBlB,kBAAkB,CAChB,QAAQ,CAAE,KAAK,CACf,MAAM,CAAE,GAAG,CACX,IAAI,CAAE,CAAC,CACP,OAAO,CDG6B,GAAG,CCFvC,qBAAE,CACA,OAAO,CAAE,KAAK,CACd,KAAK,CDT6B,KAAK,CCUvC,UAAU,CAAE,WAAW,CACvB,KAAK,CCrD6B,IAAM,CDsDxC,UAAU,CAAE,MAAM,CAClB,UAAU,CAAE,2BAA0B,CACtC,OAAO,CAAE,MAAmB,CAC5B,SAAS,CAAE,GAAG,CACd,OAAO,CAAE,CAAC,CACV,MAAM,CAAE,CAAC,CACT,WAAW,CAAE,IAAI,CACjB,QAAQ,CAAE,MAAM,CnB3FZ,kBAAoB,CAAE,gBAAM,CAK5B,eAAiB,CAAE,gBAAM,CAezB,UAAY,CAAE,gBAAM,CmByExB,0CAAsB,CACpB,UAAU,CC5FsB,OAAM,CD6FxC,uCAAmB,CACjB,UAAU,CC5DsB,OAAK,CD6DvC,0CAAsB,CACpB,UAAU,CDnFsB,OAAO,CCoFzC,yCAAqB,CACnB,UAAU,CDtEsB,OAAI,CCuEtC,wBAAI,CACF,OAAO,CAAE,CAAC,CACV,MAAM,CAAE,IAAI,CEhFd,oCAAsB,CFmFxB,kBAAkB,CAChB,MAAM,CAAE,IAAI,CACZ,GAAG,CAAE,CAAC,CACN,KAAK,CAAE,IAAI,CACX,qBAAE,CACA,KAAK,CAAE,IAAI,EG3FjB,MAAM,CACJ,SAAS,CAAE,IAAI,CACf,MAAM,CAAE,CAAC,CACT,cAAc,CAAE,QAAQ,CACxB,eAAe,CAAE,MAAM,CACvB,MAAM,CAAE,OAAO,CACf,WAAW,CAAE,MAAM,CACnB,kBAAkB,CAAE,MAAM,CAC1B,SAAS,CAAE,OAAO,CACpB,gDAAiD,CAC/C,MAAM,CAAE,CAAC,CACT,OAAO,CAAE,CAAC,CACZ,gBAAgB,CACd,MAAM,CAAE,OAAO,CAEjB,IAAI,CAEF,OAAO,CAAE,YAAY,CACrB,aAAa,CAAE,GAAG,CAClB,WAAW,CAAE,MAAM,CACnB,WAAW,CAAE,MAAM,CACnB,UAAU,CAAE,MAAM,CAClB,MAAM,CAAE,OAAO,CACf,SAAS,CAAE,IAAI,CACf,OAAO,CAAE,iBAA6F,CACtG,KAAK,CFf+B,IAAM,CEgB1C,MAAM,CAAE,yBAAyB,CACjC,gBAAgB,CF7CoB,OAAM,CE8C1C,eAAe,CAAE,IAAI,CACrB,WAAW,CAAE,MAAM,CACnB,WAAW,CFDyB,uDAA2D,CEE/F,UAAU,CAAE,mFAAqF,CACjG,YAAY,CAAE,KAAK,CACnB,cAAc,CAAE,MAAM,CACtB,QAAQ,CAAE,MAAM,CAChB,IAAI,CAAE,CAAC,CACP,iBAAiB,CAAE,IAAI,CtBxDjB,mBAAoB,CsByDb,IAAI,CtBpDX,gBAAiB,CsBoDV,IAAI,CtB/CX,eAAgB,CsB+CT,IAAI,CtBrCX,WAAY,CsBqCL,IAAI,CtBzDX,kBAAoB,CAAE,eAAM,CAK5B,eAAiB,CAAE,eAAM,CAezB,UAAY,CAAE,eAAM,CsByC5B,UAAU,CACR,UAAU,CAAE,OAAwB,CACpC,KAAK,CFjC+B,IAAM,CEoC1C,UAAO,CACL,UAAU,CAAE,OAAqC,CACjD,KAAK,CFtC6B,IAAM,CEuC1C,UAAO,CACL,UAAU,CAAE,OAAqC,CACjD,OAAO,CAAE,CAAC,CACZ,WAAQ,CACN,UAAU,CAAE,6EAA+E,CAC3F,OAAO,CAAE,iBAA6F,CACxG,YAAS,CACP,KAAK,CF9C6B,IAAM,CE+C1C,aAAU,CACR,gBAAgB,CAAE,IAAI,CACtB,MAAM,CAAE,2DAA2D,CACnE,MAAM,CAAE,iBAAmB,CAC3B,OAAO,CAAE,GAAG,CACZ,MAAM,CAAE,WAAW,CACnB,UAAU,CAAE,IAAI,CAEpB,aAAa,CACX,gBAAgB,CAAE,IAAI,CACtB,MAAM,CAAE,2DAA2D,CACnE,MAAM,CAAE,iBAAmB,CAC3B,OAAO,CAAE,GAAG,CACZ,MAAM,CAAE,WAAW,CACnB,UAAU,CAAE,IAAI,CAChB,4DAA0B,CACxB,gBAAgB,CAAE,IAAI,CACtB,MAAM,CAAE,2DAA2D,CACnE,MAAM,CAAE,iBAAmB,CAC3B,OAAO,CAAE,GAAI,CACb,MAAM,CAAE,WAAW,CACnB,UAAU,CAAE,IAAI,CAGpB,sBAAsB,CACpB,OAAO,CAAE,CAAC,CACV,MAAM,CAAE,CAAC,CAEX,UAAU,CACR,SAAS,CAAE,GAAG,CAEhB,SAAS,CACP,gBAAgB,CAAE,kBAAgB,CAClC,eAAO,CACL,gBAAgB,CAAE,kBAA6B,CAEnD,YAAY,CACV,gBAAgB,CAAE,kBAA2C,CAC7D,KAAK,CAAE,kBAAsB,CAC7B,kBAAO,CACL,gBAAgB,CAAE,kBAAuD,CACzE,KAAK,CF5F6B,OAAW,CE6F/C,oBAAS,CACP,KAAK,CAAE,kBAAsB,CAEjC,YAAY,CACV,gBAAgB,CAAE,kBAAiB,CACnC,kBAAO,CACL,gBAAgB,CAAE,eAA6B,CAEnD,WAAW,CACT,gBAAgB,CAAE,kBAAe,CACjC,iBAAO,CACL,gBAAgB,CAAE,kBAA4B,CAElD,YAAY,CACV,gBAAgB,CAAE,kBAAkB,CACpC,kBAAO,CACL,gBAAgB,CAAE,kBAA+B,CACrD,WAAW,CACT,gBAAgB,CJvIoB,IAAI,CIwIxC,iBAAO,CACL,gBAAgB,CAAE,kBAAoC,CAE1D,SAAS,CACP,gBAAgB,CAAE,sBAAsB,CACxC,KAAK,CF3G+B,OAAK,CE4GzC,UAAU,CAAE,IAAI,CAChB,YAAY,CAAE,sBAAsB,CACpC,eAAO,CACL,gBAAgB,CAAE,sBAAsB,CACxC,KAAK,CAAE,kBAAoC,CAC3C,UAAU,CAAE,IAAI,CAClB,gBAAQ,CACN,gBAAgB,CAAE,sBAAsB,CACxC,KAAK,CAAE,kBAAoC,CAC3C,UAAU,CAAE,IAAI,CAClB,iBAAS,CACP,KAAK,CFtH6B,OAAO,CEwH7C,mCAAoC,CAClC,cAAc,CAAE,MAAM,CAExB,aAAa,CACX,aAAa,CJ1IuB,IAAI,ChBuExC,KAAK,CAAE,CAAC,CACR,wCAAS,CAEP,OAAO,CAAE,KAAK,CACd,OAAO,CAAE,EAAE,CACb,mBAAO,CACL,KAAK,CAAE,IAAI,CqB3Ff,YAAY,CACV,QAAQ,CAAE,QAAQ,CAClB,OAAO,CAAE,YAAY,CAIvB,qCAAqC,CACnC,OAAO,CAAE,KAAK,CAChB,iBAAiB,CACf,QAAQ,CAAE,QAAQ,CAClB,IAAI,CAAE,CAAC,CACP,OAAO,CAAE,IAAI,CACb,KAAK,CAAE,IAAI,CACX,GAAG,CAAE,IAAI,CACT,SAAS,CAAE,IAAI,CACf,UAAU,CHW0B,OAAyB,CGV7D,OAAO,CLmD6B,GAAG,CKlDvC,MAAM,CAAE,iBAAgC,CACxC,UAAU,CAAE,2BAA0B,CACtC,OAAO,CAAE,IAAqB,CAC9B,sBAAQ,CACN,OAAO,CAAE,KAAK,CACd,KAAK,CAAE,IAAI,CACX,KAAK,CHN6B,OAAW,CGO7C,WAAW,CAAE,MAAM,CACnB,SAAS,CAAE,GAAG,CACd,OAAO,CAAE,MAAuB,CAChC,MAAM,CAAE,OAAO,CACf,4BAAO,CACL,UAAU,CHFsB,OAAK,CGGrC,KAAK,CHT2B,IAAM,CGU1C,4BAAY,CACV,UAAU,CAAE,iBAAgC,CAC5C,MAAM,CAAE,KAAuB,CACjC,2BAAW,CACT,cAAc,CAAE,IAAqB,CACrC,gDAAoB,CAClB,KAAK,CAAE,IAAI,CACf,mCAAmB,CACjB,UAAU,CAAE,OAA4B,CACxC,cAAc,CAAE,SAAS,CACzB,WAAW,CAAE,GAAG,CAChB,SAAS,CAAE,GAAG,CACd,yCAAO,CACL,UAAU,CAAE,OAA4B,CAC1C,wCAAI,CACF,KAAK,CHzB2B,IAAM,CG2B5C,6CAA6C,CAC3C,MAAM,CAAE,IAAI,CACZ,GAAG,CAAE,IAAI,CACT,IAAI,CAAE,IAAI,CACV,KAAK,CAAE,CAAC,CAGR,iDAAiB,CACf,UAAU,CH9BwB,OAAyB,CG+B3D,UAAU,CAAE,GAAG,CACjB,mDAAmB,CACjB,OAAO,CAAE,QAA2C,CACpD,yDAAO,CACL,UAAU,CHlCsB,OAAK,CGmCrC,KAAK,CHzC2B,IAAM,CG2C5C,+CAA+C,CAC7C,KAAK,CAAE,CAAC,CACR,IAAI,CAAE,IAAI,CACV,UAAU,CAAE,KAAK,CAGjB,yBAAQ,CACN,OAAO,CAAE,GAAG,CACZ,aAAa,CAAE,iBAA0B,CACzC,WAAW,CAAE,qBAAqB,CAClC,YAAY,CAAE,qBAAqB,CACnC,QAAQ,CAAE,QAAQ,CAClB,OAAO,CAAE,KAAK,CACd,GAAG,CAAE,IAAI,CACT,IAAI,CAAE,GAAG,CACT,WAAW,CAAE,IAAI,CACnB,gDAA+B,CAC7B,IAAI,CAAE,IAAI,CCtEZ,uBAAM,CACJ,OAAO,CAAE,KAAK,CAEhB,gIAA+C,CAC7C,OAAO,CAAE,YAAY,CACrB,QAAQ,CAAE,MAAM,CAChB,KAAK,CAAE,CAAC,CACR,cAAc,CAAE,MAAM,CAItB,wCAAO,CACL,OAAO,CAAE,YAAY,CACrB,cAAc,CAAE,MAAM,CACtB,KAAK,CAAE,IAAI,CACX,MAAM,CAAE,YAA+C,CACvD,KAAK,CAAE,IAAI,CACf,4BAAW,CACT,KAAK,CAAE,IAAI,CACX,kCAAK,CACH,OAAO,CAAE,KAAK,CAChB,mCAAM,CACJ,UAAU,CAAE,GAAqB,CAEvC,QAAQ,CACN,MAAM,CAAE,CAAC,CACT,MAAM,CAAE,CAAC,CACT,OAAO,CAAE,CAAC,CACZ,MAAM,CACJ,OAAO,CAAE,KAAK,CACd,KAAK,CAAE,IAAI,CACX,MAAM,CAAE,CAAC,CACT,OAAO,CAAE,CAAC,CACV,WAAW,CAAE,MAAM,CACnB,aAAa,CN/BuB,IAAI,CMgCxC,SAAS,CAAE,IAAI,CACf,YAAY,CAAE,IAAI,CACpB,KAAK,CACH,OAAO,CAAE,KAAK,CACd,MAAM,CAAE,aAAa,CACrB,KAAK,CNR+B,IAAU,CMS9C,SAAS,CAAE,GAAG,CAEhB,qBAAuB,CACrB,SAAS,CAAE,IAAI,CACf,MAAM,CAAE,CAAC,CACT,cAAc,CAAE,QAAQ,CACxB,eAAe,CAAE,MAAM,CAGzB,iBAAiB,CACf,aAAa,CNhDuB,IAAI,ChBuExC,KAAK,CAAE,CAAC,CuBrGR,SAAS,CCCC,IAAQ,CDChB,WAAI,CAAE,IAAI,CACV,YAAK,CAAE,IAAI,CvBkGb,KAAK,CAAE,CAAC,CACR,gDAAS,CAEP,OAAO,CAAE,KAAK,CACd,OAAO,CAAE,EAAE,CACb,uBAAO,CACL,KAAK,CAAE,IAAI,CALb,gDAAS,CAEP,OAAO,CAAE,KAAK,CACd,OAAO,CAAE,EAAE,CACb,uBAAO,CACL,KAAK,CAAE,IAAI,CsBzBf,uDAAyD,CACvD,OAAO,CAAE,IAAI,CACb,KAAK,CN/C+B,OAAI,CMoDxC,mGAA+C,CAC7C,cAAc,CAAE,IAAqB,CACrC,wHAAM,CACJ,KAAK,CAAE,IAAI,CAEX,0tEAAqP,CACnP,KAAK,CAAE,IAAI,CACnB,+BAA+B,CGlF3B,KAAK,CAAE,IAAsB,CAG3B,OAAO,CAAE,KAAK,CAed,YAAoB,CAAE,QAA+B,CACrD,KAAK,CAAE,IAAuC,CCnB5C,YAAoB,CAAE,CAAC,CDqBzB,0CAAa,CACX,YAAoB,CAAE,CAAC,CHgE/B,iCAAiC,CGtF7B,KAAK,CAAE,IAAsB,CAG3B,OAAO,CAAE,KAAK,CAed,YAAoB,CAAE,QAA+B,CACrD,KAAK,CAAE,SAAuC,CAE9C,4CAAa,CACX,YAAoB,CAAE,CAAC,CCA7B,iDAAwB,CACtB,YAAoB,CAAE,CAAC,CAEvB,mDAA0B,CACxB,KAAK,CALY,IAAkC,CJqEzD,iCAAiC,CG1F7B,KAAK,CAAE,IAAsB,CAG3B,OAAO,CAAE,KAAK,CAed,YAAoB,CAAE,QAA+B,CACrD,KAAK,CAAE,SAAuC,CAE9C,4CAAa,CACX,YAAoB,CAAE,CAAC,CCA7B,iDAAwB,CACtB,YAAoB,CAAE,CAAC,CAEvB,mDAA0B,CACxB,KAAK,CALY,IAAkC,CJ0EzD,uDAAuD,CACrD,MAAM,CAAE,SAA2B,CACnC,SAAS,CAAE,GAAG,CAEhB,oBAAoB,CAClB,OAAO,CAAE,YAAY,CACrB,MAAM,CAAE,SAA2B,CACnC,SAAS,CAAE,GAAG,CAOZ,osBAAqP,CACnP,KAAK,CAAE,IAAI,CAIjB,uBAAuB,CACrB,OAAO,CAAE,YAAY,CACrB,YAAY,CAAE,KAAK,CACnB,KAAK,CAAE,IAAI,CACX,cAAc,CAAE,MAAM,CACtB,SAAS,CAAE,GAAG,CAEhB,gBAAgB,CACd,OAAO,CAAE,KAAK,CACd,KAAK,CN7H+B,IAAI,CM8HxC,SAAS,CAAE,GAAG,CACd,UAAU,CAAE,OAAO,CACnB,UAAU,CAAE,MAAM,CAClB,kBAAC,CACC,SAAS,CAAE,OAAO,CAClB,UAAU,CAAE,MAAM,CAClB,aAAa,CAAE,GAAqB,CACtC,6BAAY,CACV,aAAa,CAAE,CAAC,CA4DpB,KAAK,CACH,WAAW,CAAE,MAAM,CAGnB,6DAAmD,CACjD,kBAAkB,CAAE,MAAM,CAC1B,MAAM,CAAE,OAAO,CACf,WAAW,CJ7JuB,uDAA2D,CI8J7F,SAAS,CAAE,OAAO,CACpB,gSAAqP,CACnP,kBAAkB,CAAE,IAAI,CACxB,OAAO,CAAE,GAAqB,CAC9B,OAAO,CAAE,YAAY,CACrB,MAAM,CAAE,cAA6B,CACrC,SAAS,CAAE,GAAG,CACd,WAAW,CJrKuB,uDAA2D,CIsK7F,UAAU,CAAE,oBAAmC,CAC/C,aAAa,CAAE,CAAC,CxBxNZ,kBAAoB,CAAE,kBAAM,CAK5B,eAAiB,CAAE,kBAAM,CAezB,UAAY,CAAE,kBAAM,CwBuM1B,4BAAwB,CACtB,OAAO,CAAE,eAAkB,CAC7B,eAAW,CACT,MAAM,CAAE,OAAO,CACjB,0CAAmC,CxB/N7B,kBAAoB,CwBgOZ,UAAU,CxB3NlB,eAAiB,CwB2NT,UAAU,CxB5MlB,UAAY,CwB4MJ,UAAU,CACtB,OAAO,CAAE,CAAC,CACV,YAAY,CAAE,OAAO,CACrB,OAAO,CAAE,IAAI,CACb,MAAM,CAAE,IAAI,CACd,oBAAgB,CxBrOV,kBAAoB,CwBsOZ,UAAU,CxBjOlB,eAAiB,CwBiOT,UAAU,CxBlNlB,UAAY,CwBkNJ,UAAU,CACtB,kGAA6D,CAC3D,kBAAkB,CAAE,IAAI,CAC5B,oXAAyU,CACvU,OAAO,CAAE,CAAC,CACV,OAAO,CAAE,cAAc,CACvB,YAAY,CNxLsB,IAAU,CMyL9C,oBAAgB,CACd,YAAY,CAAE,eAA8B,CAC9C,+EAAqE,CACnE,OAAO,CAAE,gBAAsB,CAC/B,OAAO,CAAE,gBAAgB,CAC3B,4aAAiY,CAC/X,MAAM,CAAE,WAAW,CACnB,gBAAgB,CAAE,OAAmC,CAEzD,+DAAiE,CAC/D,KAAK,CNzN+B,OAAI,CM0NxC,MAAM,CAAE,iBAAc,CACxB,iFAAmF,CACjF,YAAY,CN5NwB,OAAI,CM8NxC,yHAA+G,CAC7G,aAAa,CN/NqB,OAAI,CMiO1C,oBAAoB,CAClB,OAAO,CAAE,IAAqB,CAC9B,SAAS,CAAE,IAAI,CAKjB,QAAQ,CACN,QAAQ,CAAE,IAAI,CACd,cAAc,CAAE,GAAG,CACnB,KAAK,CAAE,IAAI,CACX,WAAW,CJzNyB,uDAA2D,CI0NjG,eAAgB,CACd,OAAO,CAAE,WAAgB,CACzB,OAAO,CAAE,YAAY,CACrB,MAAM,CAAE,cAA6B,CACrC,SAAS,CAAE,GAAG,CACd,UAAU,CAAE,oBAAmC,CxBhRzC,kBAAoB,CAAE,kBAAM,CAK5B,eAAiB,CAAE,kBAAM,CAezB,UAAY,CAAE,kBAAM,CwB+P5B,MAAM,CACJ,MAAM,CAAE,cAA6B,CACrC,gBAAgB,CJvPoB,IAAM,CIwP1C,gBAAW,CACT,MAAM,CAAE,IAAI,CAChB,2BAA4B,CAC1B,OAAO,CAAE,CAAC,CACZ,uFAA2F,CACzF,MAAM,CAAE,WAAW,CACnB,gBAAgB,CAAE,OAAmC,CAKrD,8DAAuD,CACrD,MAAM,CAAE,WAAW,CACvB,sBAAuB,CACrB,MAAM,CAAE,KAAuB,CAE/B,KAAK,CJ5Q+B,OAAW,CI6Q/C,OAAO,CAAE,KAAK,CACd,kCAAK,CACH,cAAc,CAAE,QAAQ,CAI5B,uBAAuB,CACrB,OAAO,CAAE,YAAY,CACrB,QAAQ,CAAE,MAAM,CAChB,KAAK,CAAE,CAAC,CACR,cAAc,CAAE,MAAM,CAuBxB,iCAAkC,CAChC,WAAW,CAAE,MAAM,CACnB,OAAO,CAAE,GAAqB,CAC9B,qEAAiB,CACf,WAAW,CAAE,IAAI,CACjB,OAAO,CAAE,KAAK,CACd,OAAO,CAAE,YAAY,CACrB,SAAS,CAAE,GAAG,CACd,gBAAgB,CJtSkB,OAAmB,CIuSrD,MAAM,CAAE,cAA6B,CACrC,KAAK,CN7U6B,IAAI,CM+U1C,kCAAkC,CAChC,WAAW,CAAE,CAAC,CAChB,kCAAkC,CAChC,YAAY,CAAE,CAAC,CAcjB,UAAU,CACR,QAAQ,CAAE,QAAQ,CAClB,OAAO,CAAE,KAAK,CACd,MAAM,CNjV8B,IAAI,CMkVxC,UAAU,CAAE,IAAqB,CACjC,MAAM,CAAE,OAAO,CACf,iBAAQ,CACN,QAAQ,CAAE,QAAQ,CAClB,OAAO,CAAE,EAAE,CACX,OAAO,CAAE,KAAK,CACd,IAAI,CAAE,CAAC,CACP,GAAG,CAAE,CAAC,CACN,KAAK,CAAE,IAAuB,CAC9B,MAAM,CAAE,IAAqB,CAC7B,aAAa,CAAE,GAAG,CAClB,UAAU,CN9WwB,IAAI,ClBNlC,kBAAoB,CAAE,oBAAM,CAK5B,eAAiB,CAAE,oBAAM,CAezB,UAAY,CAAE,oBAAM,CwBkW1B,gBAAO,CACL,QAAQ,CAAE,QAAQ,CAClB,OAAO,CAAE,EAAE,CACX,OAAO,CAAE,KAAK,CACd,KAAK,CAAE,IAAI,CACX,MAAM,CAAE,IAAI,CACZ,aAAa,CAAE,GAAG,CAClB,UAAU,CNxXwB,IAAI,CMyXtC,IAAI,CAAE,IAAI,CACV,GAAG,CAAE,IAAI,CxB/XL,kBAAoB,CAAE,oBAAM,CAK5B,eAAiB,CAAE,oBAAM,CAezB,UAAY,CAAE,oBAAM,CwB6W1B,eAAI,CACF,QAAQ,CAAE,QAAQ,CAClB,IAAI,CAAE,IAAqB,CAC3B,OAAO,CAAE,KAAK,CACd,SAAS,CAAE,IAAI,CACf,KAAK,CNhY6B,IAAI,CMiYtC,WAAW,CAAE,CAAC,CAEhB,wBAAQ,CACN,UAAU,CAAE,OAAmB,CACjC,uBAAO,CACL,IAAI,CNrX8B,IAAI,CMsXtC,UAAU,CJ3YwB,OAAM,CI6Y5C,mBAAmB,CACjB,MAAM,CAAE,WAAW,CACnB,OAAO,CAAE,GAAE,CAgDX,wGAAyB,CACvB,KAAK,CNpa6B,OAAI,CMsatC,81BAAqP,CACnP,MAAM,CAAE,iBAAc,CAC1B,iDAAQ,CACN,MAAM,CAAE,iBAAc,CAE1B,mBAAmB,CACjB,WAAW,CAAE,MAAM,CACnB,qCAAiB,CACf,OAAO,CAAE,WAAgB,CACzB,OAAO,CAAE,YAAY,CACrB,SAAS,CAAE,GAAG,CAClB,gEAAgE,CAC9D,KAAK,CJ9c+B,OAAM,CIid5C,+DAA+D,CAC7D,KAAK,CNtb+B,OAAI,CMyb1C,gEAAgE,CAC9D,KAAK,CNzc+B,OAAO,CM4c7C,6DAA6D,CAC3D,KAAK,CJxb+B,OAAK,CI8b3C,UAAU,CxBleF,iBAAoB,CAAE,aAAM,CAK5B,cAAiB,CAAE,aAAM,CAKzB,aAAgB,CAAE,aAAM,CAKxB,YAAe,CAAE,aAAM,CAKvB,SAAY,CAAE,aAAM,CwBgd5B,WAAW,CxBpeH,iBAAoB,CAAE,cAAM,CAK5B,cAAiB,CAAE,cAAM,CAKzB,aAAgB,CAAE,cAAM,CAKxB,YAAe,CAAE,cAAM,CAKvB,SAAY,CAAE,cAAM,CwBkd5B,WAAW,CxBteH,iBAAoB,CAAE,cAAM,CAK5B,cAAiB,CAAE,cAAM,CAKzB,aAAgB,CAAE,cAAM,CAKxB,YAAe,CAAE,cAAM,CAKvB,SAAY,CAAE,cAAM,CwBod5B,OAAO,CxBxeC,iBAAoB,CAAE,UAAM,CAK5B,cAAiB,CAAE,UAAM,CAKzB,aAAgB,CAAE,UAAM,CAKxB,YAAe,CAAE,UAAM,CAKvB,SAAY,CAAE,UAAM,CwBsd1B,iBAAW,CxB1eL,iBAAoB,CwB2eL,wBAAwB,CxBtevC,cAAiB,CwBseF,wBAAwB,CxBjevC,aAAgB,CwBieD,wBAAwB,CxB5dvC,YAAe,CwB4dA,wBAAwB,CxBvdvC,SAAY,CwBudG,wBAAwB,CAC7C,kBAAY,CxB5eN,iBAAoB,CwB6eL,yBAAyB,CxBxexC,cAAiB,CwBweF,yBAAyB,CxBnexC,aAAgB,CwBmeD,yBAAyB,CxB9dxC,YAAe,CwB8dA,yBAAyB,CxBzdxC,SAAY,CwBydG,yBAAyB,CAC9C,kBAAY,CxB9eN,iBAAoB,CwB+eL,yBAAyB,CxB1exC,cAAiB,CwB0eF,yBAAyB,CxBrexC,aAAgB,CwBqeD,yBAAyB,CxBhexC,YAAe,CwBgeA,yBAAyB,CxB3dxC,SAAY,CwB2dG,yBAAyB,CAEhD,yCAAyC,CAErC,8BAAqB,CACnB,MAAM,CAAE,SAAS,CAEjB,8ZAAqP,CACnP,aAAa,CAAE,KAAK,CACpB,OAAO,CAAE,KAAK,CAClB,cAAK,CACH,aAAa,CAAE,KAAK,CACpB,OAAO,CAAE,KAAK,CAEhB,kYAAqO,CACnO,aAAa,CAAE,CAAC,CAElB,wCAAuB,CACrB,aAAa,CAAE,KAAK,CACpB,UAAU,CAAE,IAAI,CAChB,OAAO,CAAE,KAAK,CACd,KAAK,CAAE,IAAI,CACb,4BAAW,CACT,MAAM,CAAE,WAAW,CACvB,iEAAmE,CACjE,OAAO,CAAE,KAAK,CACd,SAAS,CAAE,GAAG,CACd,OAAO,CAAE,KAAuB,EHnfhC,oCAAsB,CQhC1B,YAAY,CAER,OAAO,CAAE,IAAI,ER8Bb,oCAAsB,CQ5B1B,YAAY,CAER,OAAO,CAAE,IAAI,EAEjB,WAAW,CACT,KAAK,CAAE,IAAI,CAEb,YAAY,CACV,KAAK,CAAE,KAAK,CAEd,WAAW,CACT,KAAK,CAAE,IAAI,CC4Cb,mEAAS,CACP,eAAe,CAAE,QAAQ,CACzB,cAAc,CAAE,CAAC,CACjB,WAAW,CAAE,IAAI,CACjB,aAAa,CZ/BuB,IAAI,CYgCxC,2FAAO,CACL,KAAK,CAAE,IAAI,CACX,IAAI,CAAE,6BAA8B,CACpC,OAAO,CAAE,KAAK,CACd,UAAU,CAAE,MAAM,CACpB,yJAAM,CACJ,SAAS,CZjByB,GAAG,CYkBrC,MAAM,CAAE,CAAC,CACT,QAAQ,CAAE,OAAO,CACjB,OAAO,CZnB2B,QAAmC,CYoBvE,iOAA8B,CAC5B,iBAAiB,CAAE,CAAC,CACtB,qFAAK,CACH,KAAK,CAAE,IAAI,CACX,UAAU,CAAE,IAAI,CAChB,cAAc,CAAE,MAAM,CACtB,WAAW,CAAE,MAAM,CACnB,8FAAE,CACA,WAAW,CZnDqB,IAAI,CYoDpC,aAAa,CAAE,iBAA6B,CAChD,4EAAE,CACA,gBAAgB,CAAE,WAAW,CAC7B,cAAc,CAAE,MAAM,CAE1B,kFAAc,CACZ,WAAW,CAAE,IAAuB,CACpC,mHAAY,CACV,aAAa,CAAE,CAAC,CACpB,4HAA4B,CAC1B,KAAK,CAAE,EAAE,CACT,aAAa,CAAE,CAAC,CAChB,uXAA0C,CACxC,MAAM,CAAE,CAAC,CAEb,mBAAmB,CACjB,KAAK,CV9D+B,IAAY,CU+DhD,SAAS,CAAE,GAAG,CAChB,kBAAkB,CAChB,KAAK,CVjE+B,IAAY,CUkEhD,SAAS,CAAE,GAAG,CAIhB,2HAAyD,CACvD,gBAAgB,CVzDoB,OAAmB,CU2DzD,gBAAgB,CACd,gBAAgB,CV5DoB,OAAmB,CUiEzD,kDAAsB,CACpB,MAAM,CAAE,iBAA6B,CACrC,wDAAE,CACA,aAAa,CAAE,iBAA6B,CAC5C,WAAW,CAAE,iBAA6B,CAC5C,gGAAwB,CACtB,mBAAmB,CAAE,CAAC,CAE1B,kBAAkB,CAChB,MAAM,CAAE,iBAA6B,CAGrC,0BAAE,CACA,aAAa,CAAE,iBAA6B,CAC9C,8CAAwB,CACtB,mBAAmB,CAAE,CAAC,CAGxB,2CAAwB,CACtB,mBAAmB,CAAE,CAAC,CACxB,+CAAM,CACJ,YAAY,CAAE,SAAS,CACvB,aAAa,CAAE,iBAA6B,CAC9C,2CAAwB,CACtB,mBAAmB,CAAE,CAAC,CAG1B,oBAAoB,CAClB,aAAa,CZhHuB,IAAI,CYiHxC,SAAS,CAAE,IAAI,CACf,QAAQ,CAAE,IAAI,CACd,0BAAK,CACH,aAAa,CAAE,YAAY,CAC3B,2DAAM,CACJ,WAAW,CAAE,MAAM,CCzIzB,CAAC,CACC,KAAK,CX+B+B,OAAK,CW9BzC,eAAe,CAAE,IAAI,CACrB,MAAM,CAAE,OAAO,CACf,OAAO,CACL,KAAK,CbgD6B,OAAwB,Ca/C5D,SAAS,CACP,KAAK,CX0B6B,OAAO,CWA7C,IAAI,CACF,MAAM,CAAE,IAAI,CACZ,UAAU,CAAE,MAAM,CAEpB,IAAI,CACF,WAAW,CXOyB,uDAA2D,CWN/F,WAAW,CAAE,MAAM,CACnB,KAAK,CXlB+B,OAAW,CWmB/C,UAAU,CAAE,IAAI,CAChB,UAAU,CAAE,MAAM,CAClB,UAAU,CbnD0B,OAAO,CaqD7C,aAAa,CACX,UAAU,CAAE,IAAI,CAElB,eAAe,CACb,UAAU,CAAE,MAAM,CAEpB,cAAc,CACZ,UAAU,CAAE,KAAK,CAEnB,cAAc,CACZ,SAAS,CAAE,IAAI,CAEjB,eAAe,CACb,SAAS,CAAE,IAAI,CAEjB,oBAAqB,CACnB,SAAS,CAAE,GAAG,CAEhB,eAAe,CACb,eAAe,CAAE,YAAY,CAE/B,gBAAgB,CACd,KAAK,CAAE,kBAAkB,CAC3B,uBAAuB,CACrB,KAAK,CAAE,kBAAgC,CACzC,aAAa,CACX,KAAK,CAAE,kBAAgB,CACzB,oBAAoB,CAClB,KAAK,CAAE,kBAA8B,CACvC,gBAAgB,CACd,KAAK,CAAE,kBAAiB,CAC1B,uBAAuB,CACrB,KAAK,CAAE,kBAA+B,CACxC,eAAe,CACb,KAAK,CAAE,kBAAe,CACxB,sBAAsB,CACpB,KAAK,CAAE,kBAA6B,CACtC,gBAAgB,CACd,KAAK,CAAE,kBAAsB,CAC/B,uBAAuB,CACrB,KAAK,CAAE,kBAAoC,CAkB7C,gEAAyB,CACvB,UAAU,CAAE,CAAC,CACb,WAAW,CAAE,GAAG,CAChB,WAAW,CX5DyB,0DAA8D,CW8DpG,CAAC,CACC,WAAW,Cb1FyB,IAAI,Ca2FxC,MAAM,CAAE,CAAC,CACT,SAAS,Cb/F2B,IAAI,CagGxC,aAAa,Cb7FuB,IAAI,Ca+F1C,EAAE,CACA,SAAS,CAAE,IAAI,CAEjB,0CAAE,CACA,SAAS,CAAE,IAAI,CAEjB,EAAE,CACA,SAAS,CAAE,IAAI,CAEjB,EAAE,CACA,SAAS,CAAE,IAAI,CAEjB,EAAE,CACA,SAAS,CAAE,IAAI,CAEjB,EAAE,CACA,SAAS,CAAE,IAAI,CAEjB,EAAE,CACA,OAAO,CAAE,KAAK,CACd,MAAM,CAAE,GAAG,CACX,MAAM,CAAE,CAAC,CACT,UAAU,CAAE,iBAA6B,CACzC,MAAM,CAAE,MAAmB,CAC3B,OAAO,CAAE,CAAC,CAEZ,sCAAI,CACF,WAAW,CAAE,MAAM,CACnB,SAAS,CAAE,IAAI,CACf,UAAU,CXrH0B,IAAM,CWsH1C,MAAM,CAAE,iBAAiC,CACzC,SAAS,CAAE,GAAG,CACd,OAAO,CAAE,KAAK,CACd,WAAW,CXnGyB,wMAAoN,CWoGxP,KAAK,Cb1H+B,OAAI,Ca2HxC,UAAU,CAAE,IAAI,CAChB,0CAAY,CACV,SAAS,CAAE,GAAG,CAmClB,wFAAmB,CACjB,UAAU,CAAE,IAAI,CAChB,WAAW,CbzKyB,IAAI,Ca0KxC,aAAa,Cb1KuB,IAAI,Ca2KxC,oGAAE,CACA,UAAU,CAAE,IAAI,CAChB,WAAW,Cb7KuB,IAAI,Ca8KtC,wJAAY,CACV,aAAa,CAAE,CAAC,CAClB,gHAAE,CACA,aAAa,CAAE,CAAC,CAClB,gHAAE,CACA,UAAU,CAAE,MAAM,CAClB,4HAAE,CACA,UAAU,CAAE,MAAM,CACtB,4HAAK,CACH,UAAU,CAAE,OAAO,CAEzB,iFAAsB,CACpB,UAAU,CAAE,OAAO,CACnB,WAAW,Cb3LyB,IAAI,Ca4LxC,aAAa,Cb5LuB,IAAI,Ca6LxC,6FAAE,CACA,UAAU,CAAE,OAAO,CACnB,WAAW,Cb/LuB,IAAI,CagMtC,iJAAY,CACV,aAAa,CAAE,CAAC,CAClB,yGAAE,CACA,aAAa,CAAE,CAAC,CAChB,qHAAE,CACA,UAAU,CAAE,IAAI,CCrOxB,kBAAkB,CAChB,MAAM,CAAE,iBAA6B,CACrC,aAAa,CAAE,IAAI,CACnB,OAAO,Cd6B6B,IAAI,Cc5BxC,WAAW,CAAE,IAAqB,CAClC,WAAW,CAAE,GAAG,CAChB,UAAU,CZiC0B,IAAM,CYhC1C,QAAQ,CAAE,QAAQ,CAClB,wBAAO,CACL,OAAO,CAAE,SAAS,CAClB,QAAQ,CAAE,QAAQ,CAClB,GAAG,CAAE,GAAG,CACR,IAAI,CAAE,GAAG,CACT,UAAU,CZiCwB,OAAO,CYhCzC,KAAK,CAAE,IAAoB,CAC3B,OAAO,CAAE,QAA2C,CACtD,2CAA0B,CACxB,MAAM,CAAE,iBAA6B,CACrC,aAAa,CdcqB,IAAI,CcZ1C,+GAAmC,CACjC,MAAM,CAAE,iBAA6B,CACrC,OAAO,CAAE,GAAG,CACZ,UAAU,CAAE,IAAI,CAChB,UAAU,CZe0B,IAAM,CYb1C,MAAM,CAAE,YAAyB,CACjC,gLAAuB,CACrB,MAAM,CAAE,IAAI,CACZ,UAAU,CAAE,IAAI,CAChB,MAAM,CAAE,CAAC,CAEb,+BAA+B,CAC7B,KAAK,CAAE,IAAI,CACb,cAAc,CACZ,YAAY,CAAE,iBAA0C,CACxD,MAAM,CAAE,CAAC,CACT,OAAO,CAAE,SAA2C,CACpD,WAAW,CZuByB,wMAAoN,CYtBxP,SAAS,CAAE,IAAI,CACf,WAAW,CAAE,GAAG,CAChB,KAAK,CdI+B,OAAwB,CcH9D,2BAA2B,CACzB,WAAW,CAAE,GAAG,CAChB,MAAM,CAAE,CAAC,CACT,OAAO,CAAE,SAA2C,CACpD,WAAW,CZeyB,wMAAoN,CYdxP,SAAS,CAAE,IAAI,CACf,WAAW,CAAE,GAAG,CAChB,OAAO,CAAE,KAAK,CACd,QAAQ,CAAE,IAAI,CACd,KAAK,CZhB+B,OAAW,CYoBjD,YAAY,CACV,2IAAgE,CAC9D,WAAW,CAAE,QAAQ,ECzDzB,IAAI,CACF,gBAAgB,CAAE,IAAO,CACzB,MAAM,CAAE,OAAO,CACf,OAAO,CAAE,MAAM,CACf,OAAO,CAAE,KAAK,CAChB,EAAE,CACA,KAAK,CAAE,IAAO,CACd,UAAU,CAAE,MAAM,CACpB,IAAI,CACF,KAAK,CAAE,OAAO,CACd,gBAAgB,CAAE,OAAO,CAC3B,EAAE,CACA,WAAW,CAAE,IAAI,CACnB,EAAE,CACA,WAAW,CAAE,IAAI,CACnB,GAAG,CACD,KAAK,CAAE,IAAO,CACd,UAAU,CAAE,MAAM,CACpB,GAAG,CACD,KAAK,CAAE,IAAO,CACd,WAAW,CAAE,IAAI,CACnB,GAAG,CACD,KAAK,CAAE,IAAO,CACd,UAAU,CAAE,MAAM,CACpB,GAAG,CACD,KAAK,CAAE,IAAO,CACd,WAAW,CAAE,IAAI,CACjB,UAAU,CAAE,MAAM,CACpB,GAAG,CACD,KAAK,CAAE,IAAO,CACd,gBAAgB,CAAE,IAAO,CAC3B,MAAM,CACJ,KAAK,CAAE,IAAO,CACd,gBAAgB,CAAE,IAAO,CAC3B,GAAG,CACD,UAAU,CAAE,MAAM,CACpB,GAAG,CACD,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,KAAK,CAAE,IAAO,CACd,gBAAgB,CAAE,IAAO,CAC3B,MAAM,CACJ,KAAK,CAAE,IAAO,CACd,gBAAgB,CAAE,IAAO,CAC3B,GAAG,CACD,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,WAAW,CAAE,IAAI,CACnB,GAAG,CACD,KAAK,CAAE,MAAO,CACd,WAAW,CAAE,IAAI,CACnB,GAAG,CACD,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,WAAW,CAAE,IAAI,CACnB,GAAG,CACD,WAAW,CAAE,IAAI,CACnB,GAAG,CACD,WAAW,CAAE,IAAI,CACnB,GAAG,CACD,WAAW,CAAE,IAAI,CACnB,GAAG,CACD,WAAW,CAAE,IAAI,CACnB,GAAG,CACD,KAAK,CAAE,IAAO,CACd,WAAW,CAAE,IAAI,CACnB,EAAE,CACA,KAAK,CAAE,IAAO,CAChB,EAAE,CACA,KAAK,CAAE,IAAO,CAChB,EAAE,CACA,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,KAAK,CAAE,IAAI,CACb,GAAG,CACD,KAAK,CAAE,OAAO,CAChB,GAAG,CACD,KAAK,CAAE,IAAO,CACd,WAAW,CAAE,IAAI,CACnB,GAAG,CACD,KAAK,CAAE,IAAI,CACb,GAAG,CACD,KAAK,CAAE,MAAM,CACf,GAAG,CACD,KAAK,CAAE,IAAO,CACd,WAAW,CAAE,IAAI,CACnB,GAAG,CACD,KAAK,CAAE,IAAO,CACd,WAAW,CAAE,IAAI,CACnB,GAAG,CACD,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,KAAK,CAAE,IAAI,CACb,GAAG,CACD,KAAK,CAAE,IAAI,CACb,GAAG,CACD,WAAW,CAAE,IAAI,CACnB,EAAE,CACA,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,KAAK,CAAE,OAAO,CAChB,GAAG,CACD,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,KAAK,CAAE,OAAO,CAChB,GAAG,CACD,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,KAAK,CAAE,IAAI,CACb,GAAG,CACD,KAAK,CAAE,IAAI,CACb,GAAG,CACD,KAAK,CAAE,IAAI,CACb,GAAG,CACD,KAAK,CAAE,IAAO,CAChB,GAAG,CACD,KAAK,CAAE,IAAI,CACX,gBAAgB,CAAE,OAAO,CCjJ3B,kBAAkB,CAChB,OAAO,CAAE,YAAY,CACrB,uCAAsB,CACpB,KAAK,CAAE,KAAK,CACd,oBAAC,CACC,OAAO,CAAE,YAAY,CACrB,OAAO,CAAE,GAAG,CACZ,gCAAa,CACX,YAAY,CAAE,CAAC,CACnB,6FAAI,CACF,OAAO,CAAE,GAAG,CACZ,MAAM,CAAE,IAAI,CACZ,UAAU,CAAE,IAAI,CAChB,qHAAS,CACP,KAAK,CdqB2B,OAAW,CcpBjD,qBAAqB,CACnB,aAAa,CAAE,CAAC,CAChB,KAAK,CdqB+B,OAAW,CcpB/C,SAAS,CAAE,GAAG,CACd,OAAO,CAAE,YAAY,CbanB,oCAAsB,CaTxB,qBAAqB,CACnB,OAAO,CAAE,IAAI,CACf,uCAAuC,CACrC,OAAO,CAAE,IAAI,EAEjB,YAAY,CACV,uCAAuC,CACrC,OAAO,CAAE,IAAI,EC9BjB,SAAS,CACP,QAAQ,CAAE,KAAK,CACf,GAAG,CCAO,OAAO,CDGjB,gBAAO,CACL,eAAe,CAAE,IAAI,CAEzB,cAAc,CjC+FZ,KAAK,CAAE,CAAC,CACR,0CAAS,CAEP,OAAO,CAAE,KAAK,CACd,OAAO,CAAE,EAAE,CACb,oBAAO,CACL,KAAK,CAAE,IAAI,CiCnGb,mCAAM,CACJ,OAAO,CAAE,YAAY,CACvB,uBAAQ,CACN,UAAU,CAAE,qBAAoB,CAEhC,6BAAa,CACX,WAAW,CAAE,iBAAyB,CACxC,8BAAc,CACZ,YAAY,CAAE,iBAAyB,CAC3C,gBAAC,CACC,MAAM,CAAE,IAAmB,CAC3B,OAAO,CAAE,YAAY,CACrB,WAAW,CAAE,IAAmB,CAChC,OAAO,CAAE,MAAiB,CAE9B,iBAAiB,CACf,KAAK,CjBuD+B,KAAK,CiBtDzC,oDAAiB,CACf,MAAM,CAAE,IAAmB,CAC3B,OAAO,CAAE,YAAY,CACrB,WAAW,CAAE,IAAmB,CAChC,OAAO,CAAE,SAAS,CAClB,aAAa,CAAE,CAAC,CAChB,OAAO,CAAE,KAAK,CACd,WAAW,CAAE,IAAI,CACjB,cAAc,CAAE,SAAS,CACzB,SAAS,CAAE,GAAG,CACd,KAAK,CfR6B,OAAwB,CeS1D,WAAW,CAAE,MAAM,CAErB,oBAAE,CACA,aAAa,CAAE,CAAC,CAEhB,+BAAY,CACV,UAAU,CAAE,iBAAyB,CACvC,kCAAe,CACb,aAAa,CAAE,iBAAyB,CAC1C,4BAAS,CACP,UAAU,CAAE,OAA4C,CACxD,8BAAC,CACC,KAAK,CfbyB,IAAY,Cec1C,YAAY,CAAE,iBAAsD,CACpE,OAAO,CAAE,eAAyB,CAClC,oCAAO,CACL,UAAU,CAAE,OAA4C,CAC9D,mGAAI,CACF,MAAM,CAAE,IAAI,CACZ,UAAU,CAAE,OAAO,CACnB,KAAK,CAAE,OAAO,CACd,YAAY,CAAE,CAAC,CACf,aAAa,CAAE,CAAC,CAElB,wCAAmB,CACjB,OAAO,CAAE,KAAK,CACd,KAAK,CAAE,IAAI,CACX,WAAW,CAAE,MAAM,CAGnB,SAAS,CAAE,IAAI,CACf,WAAW,CAAE,KAAK,CAClB,KAAK,CAAE,OAA8B,CAGzC,wDAAuB,CACrB,KAAK,CfvC6B,OAAW,CewC7C,OAAO,CAAE,eAAmB,CAC5B,WAAW,CAAE,IAAI,CACjB,QAAQ,CAAE,QAAQ,CAClB,UAAU,CflCwB,OAAyB,CemC3D,MAAM,CAAE,IAAI,CACZ,aAAa,CAAE,iBAAsD,CACrE,UAAU,CAAE,iBAAsD,CAClE,YAAY,CAAE,YAAY,CAE1B,oEAAO,CACL,UAAU,CfzCsB,OAAyB,Ce0CzD,4GAAmB,CACjB,KAAK,CflDyB,IAAY,CemD9C,gGAAmB,CAGjB,OAAO,CAAE,KAAK,CACd,SAAS,CAAE,IAAI,CACf,WAAW,CAAE,KAAK,CAClB,KAAK,CAAE,IAA8B,CAIvC,iHAAI,CACF,OAAO,CAAE,IAAI,CACf,iIAAc,CACZ,OAAO,CAAE,KAAK,CAGd,yCAAG,CACD,UAAU,CAAE,OAA4C,CACxD,OAAO,CAAE,eAAyB,CACpC,uDAAiB,CACf,OAAO,CAAE,KAAK,CACd,UAAU,CAAE,OAA4C,CACxD,OAAO,CAAE,eAAyB,CACtC,2DAA2B,CACzB,KAAK,Cf3E2B,IAAY,Ce4E9C,mDAAmB,CACjB,KAAK,CAAE,OAA4C,CACvD,+BAAa,CACX,SAAS,CAAE,IAAI,CAEb,yCAAG,CACD,UAAU,CAAE,OAA4C,CACxD,OAAO,CAAE,eAAyB,CACpC,uDAAiB,CACf,OAAO,CAAE,KAAK,CACd,UAAU,CAAE,OAA4C,CACxD,OAAO,CAAE,eAAyB,CAClC,UAAU,CAAE,IAAI,CAChB,aAAa,CAAE,IAAI,CACvB,2DAA2B,CACzB,KAAK,Cf3F2B,IAAY,Ce4F9C,mDAAmB,CACjB,KAAK,CAAE,OAA4C,CACvD,+BAAa,CACX,SAAS,CAAE,IAAI,CAEjB,+BAAa,CACX,OAAO,CAAE,KAAK,CAChB,uBAAK,CACH,aAAa,CAAE,CAAC,CAChB,OAAO,CAAE,IAAI,CAEb,kCAAK,CACH,OAAO,CAAE,KAAK,CAClB,4BAAU,CACR,aAAa,CAAE,CAAC,CAChB,KAAK,Cf1G6B,OAAW,Ce2G7C,WAAW,CAAE,MAAM,CACrB,mBAAC,CACC,OAAO,CAAE,YAAY,CACrB,WAAW,CAAE,IAAI,CACjB,OAAO,CAAE,eAAmB,CAC5B,OAAO,CAAE,KAAK,CACd,QAAQ,CAAE,QAAQ,CAClB,SAAS,CAAE,GAAG,CACd,KAAK,CfnH6B,OAAW,CeoH7C,yBAAO,CACL,gBAAgB,CAAE,OAAoC,CACtD,MAAM,CAAE,OAAO,CACf,6CAAmB,CACjB,KAAK,CfxHyB,OAAW,CeyH7C,0BAAQ,CACN,gBAAgB,CfnHgB,OAAK,CeoHrC,MAAM,CAAE,OAAO,CACf,KAAK,Cf3H2B,IAAM,Ce4HtC,8CAAmB,CACjB,KAAK,Cf7HyB,IAAM,Ce+H5C,mBAAmB,CACjB,OAAO,CAAE,KAAK,CACd,KAAK,CjBvF+B,KAAK,CiBwFzC,OAAO,CAAE,MAAW,CACpB,aAAa,CAAE,MAAW,CAC1B,OAAO,CjBrF6B,GAAG,CiBsFvC,gBAAgB,Cf/HoB,OAAK,CegIzC,UAAU,CAAE,MAAM,CAClB,OAAO,CAAE,MAAW,CACpB,OAAO,CAAE,KAAK,CACd,KAAK,CfpI+B,OAAyB,CeqI7D,aAAa,CAAE,MAAW,CAC1B,oCAAgB,CACd,KAAK,CAAE,IAAI,CACX,aAAa,CAAE,IAAI,CACnB,OAAO,CAAE,QAAQ,CACjB,YAAY,CAAE,OAAuB,CACvC,uBAAG,CACD,OAAO,CAAE,KAAK,CACd,MAAM,CAAE,qBAA0B,CAClC,MAAM,CAAE,IAAI,CACZ,KAAK,CAAE,IAAI,CACX,gBAAgB,Cf/IkB,OAAK,CegJvC,OAAO,CAAE,GAAG,CACZ,aAAa,CAAE,IAAI,CACrB,wDAAqB,CACnB,KAAK,CfpJ6B,OAAyB,CeqJ3D,SAAS,CAAE,IAAI,CACf,WAAW,CAAE,IAAI,CACjB,OAAO,CAAE,YAAY,CACrB,OAAO,CAAE,OAA2C,CACpD,aAAa,CAAE,MAAW,CAE1B,oEAAO,CACL,UAAU,CAAE,qBAAoB,CAClC,0EAAQ,CACN,OAAO,CAAE,KAAK,CACd,MAAM,CAAE,MAAM,CACd,MAAM,CAAE,IAAI,CACZ,KAAK,CAAE,IAAI,CACX,aAAa,CAAE,CAAC,CAChB,SAAS,CAAE,IAAI,CACf,UAAU,CAAE,WAAa,CAEzB,oFAAQ,CACN,UAAU,CAAE,KAAM,CACxB,+BAAa,CACX,UAAU,CAAE,QAAkB,CAC9B,aAAa,CAAE,MAAW,CAC1B,WAAW,CAAE,MAAM,CACnB,KAAK,CAAE,qBAAoB,CAI7B,gCAAM,CACJ,KAAK,CfhL6B,OAAK,CeiLzC,2BAAC,CACC,KAAK,CfzL6B,OAAW,Ce0L7C,iCAAO,CACL,gBAAgB,CfpLgB,OAAK,CeqLrC,KAAK,Cf3L2B,IAAM,Ce6L5C,gBAAgB,CnC3NR,kBAAoB,CAAE,eAAM,CAK5B,eAAiB,CAAE,eAAM,CAezB,UAAY,CAAE,eAAM,CmCyM1B,QAAQ,CAAE,QAAQ,CAClB,OAAO,CAAE,CAAC,CACV,KAAK,CAAE,IAAI,CACX,OAAO,CAAE,CAAC,CACV,4BAAa,CACX,IAAI,CAAE,CAAC,CACP,KAAK,CAAE,IAAI,CACX,OAAO,CAAE,CAAC,CACZ,0BAAW,CACT,KAAK,CAAE,IAAI,CACX,IAAI,CAAE,KAAK,CACX,OAAO,CAAE,CAAC,CACZ,2BAAY,CACV,KAAK,CAAE,KAAK,CACZ,IAAI,CAAE,IAAI,CACV,OAAO,CAAE,CAAC,CAGd,gBAAgB,CACd,UAAU,CAAE,qBAAuC,CACnD,gBAAgB,CAAE,2uCAA2uC,CAC7vC,eAAe,CAAE,SAAsB,CAEzC,gBAAgB,CACd,QAAQ,CAAE,QAAQ,CAClB,KAAK,CAAE,IAAI,CACX,MAAM,CAAE,IAAI,CAEd,YAAY,CACV,QAAQ,CAAE,KAAK,CACf,GAAG,CAAE,CAAC,CACN,MAAM,CAAE,CAAC,CACT,IAAI,CAAE,CAAC,CACP,cAAc,CAAE,GAAG,CACnB,KAAK,CjBvL+B,KAAK,CiBwLzC,UAAU,CAAE,MAAM,CAClB,UAAU,CAAE,MAAM,CAClB,UAAU,CAAE,IAAI,CAChB,UAAU,CflO0B,OAAsB,CemO1D,OAAO,CjBvL6B,GAAG,CiByLzC,eAAe,CACb,KAAK,CAAE,KAAyB,CAChC,QAAQ,CAAE,QAAQ,CAClB,UAAU,CAAE,MAAM,CAClB,UAAU,CAAE,MAAM,CAClB,MAAM,CAAE,IAAI,CAEd,WAAW,CACT,OAAO,CAAE,IAAI,CACb,UAAU,Cf3O0B,OAAK,Ce4OzC,KAAK,CflP+B,IAAM,CemP1C,OAAO,CAAE,cAAuB,CAChC,QAAQ,CAAE,QAAQ,CAClB,WAAW,CAAE,IAAI,CACjB,UAAU,CAAE,MAAM,CAClB,SAAS,CAAE,IAAI,CjCvLf,KAAK,CAAE,CAAC,CACR,oCAAS,CAEP,OAAO,CAAE,KAAK,CACd,OAAO,CAAE,EAAE,CACb,iBAAO,CACL,KAAK,CAAE,IAAI,CiCmLb,aAAC,CACC,KAAK,Cf1P6B,IAAM,Ce2PxC,WAAW,CAAE,IAAI,CAEnB,eAAG,CACD,YAAY,CAAE,IAAqB,CACnC,MAAM,CAAE,IAAI,CACZ,KAAK,CAAE,IAAI,CACX,gBAAgB,Cf3PkB,OAAK,Ce4PvC,OAAO,CAAE,GAAG,CACZ,aAAa,CAAE,IAAI,CACrB,aAAC,CACC,SAAS,CAAE,IAAI,CACf,KAAK,CAAE,IAAI,CACX,MAAM,CAAE,OAAO,CACf,WAAW,CAAE,OAAO,CAExB,oBAAoB,CAClB,WAAW,CjBjOyB,KAAK,CiBkOzC,UAAU,CfvQ0B,OAAyB,CewQ7D,UAAU,CAAE,IAAI,CAElB,eAAe,CACb,OAAO,CAAE,eAAmB,CAC5B,MAAM,CAAE,IAAI,CACZ,SAAS,CAAE,KAAK,CAChB,MAAM,CAAE,IAAI,CAEd,aAAa,CACX,QAAQ,CAAE,KAAK,CACf,KAAK,CAAE,IAAI,CACX,MAAM,CAAE,IAAI,CACZ,UAAU,CAAE,eAAc,CAC1B,OAAO,CAAE,IAAI,CACb,OAAO,CAAE,GAAkB,CAC3B,gBAAI,CACF,OAAO,CAAE,KAAK,CAClB,MAAM,CACJ,KAAK,CfjS+B,IAAY,CekShD,QAAC,CACC,aAAa,CAAE,IAAqB,CACtC,6FAAgB,CACd,OAAO,CAAE,GAAG,CACZ,WAAW,Cf9QuB,wMAAoN,Ce+QtP,SAAS,CAAE,GAAG,CACd,UAAU,CAAE,IAAI,CAChB,MAAM,CAAE,IAAI,CACZ,KAAK,Cf1S6B,IAAY,Ce4SlD,mBAAmB,CjC1OjB,KAAK,CAAE,CAAC,CiC2OR,oDAAiB,CACf,KAAK,CAAE,IAAI,CjC3Ob,oDAAS,CAEP,OAAO,CAAE,KAAK,CACd,OAAO,CAAE,EAAE,CACb,yBAAO,CACL,KAAK,CAAE,IAAI,CiCyOf,wBAAwB,CACtB,UAAU,CAAE,IAAI,CjChPhB,KAAK,CAAE,CAAC,CACR,8DAAS,CAEP,OAAO,CAAE,KAAK,CACd,OAAO,CAAE,EAAE,CACb,8BAAO,CACL,KAAK,CAAE,IAAI,CiC8Ob,0BAAU,CACR,aAAa,CjB5TqB,IAAI,CiB6TtC,aAAa,CAAE,iBAA6B,CAC5C,cAAc,CjB9ToB,IAAI,CiB+TxC,sCAAsB,CACpB,UAAU,CAAE,iBAA6B,CACzC,WAAW,CjBjUuB,IAAI,CiBkUxC,4BAAY,CACV,SAAS,CAAE,IAAI,CACf,aAAa,CAAE,IAAqB,CACpC,OAAO,CAAE,YAAY,CACvB,wBAAQ,CACN,KAAK,CflU6B,IAAY,CemU9C,SAAS,CAAE,GAAG,CdxUd,oCAAsB,Cc4UxB,gBAAgB,CACd,UAAU,CfjUwB,OAAyB,CekU7D,WAAW,CACT,OAAO,CAAE,KAAK,CAChB,YAAY,CAER,IAAI,CAAE,MAAmB,CAG3B,kBAAO,CACL,KAAK,CAAE,GAAG,CACV,IAAI,CAAE,CAAC,CACX,eAAe,CACb,KAAK,CAAE,IAAI,CACb,mBAAmB,CACjB,KAAK,CAAE,IAAI,CACb,yBAAyB,CACvB,KAAK,CAAE,IAAI,CACb,oBAAoB,CAClB,WAAW,CAAE,CAAC,CACd,oCAAe,CACb,OAAO,CC/XD,OAAO,CDgYf,0BAAO,CACL,QAAQ,CAAE,KAAK,CACf,SAAS,CAAE,IAAI,CACf,IAAI,CAAE,GAAG,CACT,GAAG,CAAE,CAAC,CACN,MAAM,CAAE,IAAI,CACZ,QAAQ,CAAE,MAAM,EdxWlB,qCAAsB,Cc2WxB,oBAAoB,CAClB,UAAU,CAAE,gBAAe,CAC7B,eAAe,CACb,MAAM,CAAE,CAAC,CACT,UAAU,CfnWwB,OAAyB,EeqW/D,YAAY,CACV,iCAAmC,CACjC,OAAO,CAAE,IAAI,CACf,oBAAoB,CAClB,WAAW,CAAE,CAAC,EErZlB,aAAa,CACX,QAAQ,CAAE,KAAK,CACf,MAAM,CAAE,CAAC,CACT,IAAI,CAAE,CAAC,CACP,KAAK,CnB6E+B,KAAK,CmB5EzC,KAAK,CjBuC+B,OAAyB,CiBtC7D,UAAU,CAAE,OAAkC,CAC9C,UAAU,CAAE,kBAAiC,CAC7C,WAAW,CjBkDyB,uDAA2D,CiBjD/F,OAAO,CnB+E6B,GAAG,CmB9EvC,eAAC,CACC,KAAK,CjBkC6B,OAAK,CiBjCvC,eAAe,CAAE,IAAI,CACvB,8BAAgB,CACd,OAAO,CAAE,IAAI,CACf,kCAAoB,CAClB,OAAO,CAAE,IAAqB,CAC9B,gBAAgB,CAAE,OAAkC,CACpD,OAAO,CAAE,KAAK,CACd,UAAU,CAAE,KAAK,CACjB,SAAS,CAAE,GAAG,CACd,MAAM,CAAE,OAAO,CACf,KAAK,CjBX6B,OAAM,ClB4F1C,KAAK,CAAE,CAAC,CACR,kFAAS,CAEP,OAAO,CAAE,KAAK,CACd,OAAO,CAAE,EAAE,CACb,wCAAO,CACL,KAAK,CAAE,IAAI,CmCrFX,uqDAAG,CACD,KAAK,CjBmB2B,OAAyB,CiBlB3D,yFAAQ,CACN,KAAK,CAAE,IAAI,CACb,6CAAU,CACR,KAAK,CAAE,IAAI,CACb,kDAAiB,CACf,gBAAgB,CnBQgB,OAAI,CmBPpC,KAAK,CjBO2B,IAAM,CiBNxC,yDAAwB,CACtB,gBAAgB,CjBsBgB,OAAO,CiBrBvC,KAAK,CnBzB2B,IAAI,CmB0BxC,0CAA8B,CAC5B,OAAO,CAAE,KAAK,CAChB,iCAAmB,CACjB,SAAS,CAAE,GAAG,CACd,OAAO,CAAE,IAAqB,CAC9B,KAAK,CjBJ6B,IAAY,CiBK9C,OAAO,CAAE,IAAI,CACb,oCAAE,CACA,OAAO,CAAE,KAAK,CACd,MAAM,CAAE,GAAG,CACX,MAAM,CAAE,CAAC,CACT,MAAM,CAAE,MAAM,CACd,OAAO,CAAE,CAAC,CACV,UAAU,CAAE,iBAA6C,CAC3D,oCAAE,CACA,OAAO,CAAE,YAAY,CACrB,MAAM,CAAE,CAAC,CACT,sCAAC,CACC,OAAO,CAAE,YAAY,CACrB,OAAO,CAAE,GAAqB,CAC9B,KAAK,CjBZyB,OAAyB,CiBa7D,uBAAW,CACT,KAAK,CAAE,IAAI,CACX,MAAM,CAAE,IAAI,CACZ,KAAK,CAAE,IAAI,CACX,IAAI,CAAE,IAAI,CACV,MAAM,CAAE,IAAI,CACZ,SAAS,CnBkByB,KAAK,CmBjBvC,kCAAU,CACR,KAAK,CAAE,IAAI,CACb,mEAAQ,CACN,KAAK,CAAE,IAAI,CACb,qDAA+B,CAC7B,UAAU,CAAE,KAAK,CACjB,+HAAQ,CACN,KAAK,CAAE,IAAI,CACb,gEAAU,CACR,KAAK,CAAE,IAAI,CACf,4CAAoB,CAClB,KAAK,CAAE,IAAI,CACX,MAAM,CAAE,IAAI,CACZ,WAAW,CAAE,IAAI,CACjB,OAAO,CAAE,KAAuB,CAChC,OAAO,CAAE,KAAK,CACd,UAAU,CAAE,MAAM,ChBhDpB,oCAAsB,CgBmDxB,aAAa,CACX,KAAK,CAAE,GAAG,CACV,OAAO,CAAE,IAAI,CACb,mBAAO,CACL,OAAO,CAAE,KAAK,ECtElB,gBAAG,CACD,SAAS,CAAE,IAAI,CACf,MAAM,CAAE,eAAe,CAEzB,uDAAkC,CAChC,WAAW,CAAE,MAAM,CAErB,uBAAU,CACR,aAAa,CpBOqB,IAAI,CoBNtC,iCAAS,CACP,UAAU,CAAE,MAAM,CAEtB,oCAAuB,CACrB,UAAU,CAAE,MAAM,CAGpB,qDAAoC,CAClC,aAAa,CpBFqB,IAAI,CoBaxC,uBAAU,CACR,WAAW,CpBduB,IAAI,CoBetC,WAAW,CpBfuB,IAAI,CoBgBtC,aAAa,CpBhBqB,IAAI,CoBsBtC,kTAAK,CACH,aAAa,CAAE,CAAC,CAKlB,qCAAQ,CACN,YAAY,CAAE,GAAG,CAUrB,8BAAiB,CACf,YAAY,CAAE,eAAc,CAC5B,mEAAM,CACJ,UAAU,CAAE,sBAAsB,CAClC,YAAY,CAAE,0BAAyB,CAG3C,0EAAiD,CAC/C,UAAU,CAAE,WAAW,CACzB,0EAAiD,CAC/C,UAAU,CAAE,WAAW,CAGzB,qDAA4B,CAC1B,aAAa,CAAE,IAAqB,CACtC,wBAAW,CACT,WAAW,CpBvDuB,IAAI,CoB0DxC,yBAAY,CACV,WAAW,CAAE,IAAI,CACjB,aAAa,CAAE,IAAqB,CACtC,yBAAY,CACV,KAAK,ClB3D6B,OAAW,CkB4D/C,yBAAY,CACV,KAAK,CAAE,KAAK,CACZ,MAAM,CAAE,iBAA2C,CACrD,wBAAW,CACT,KAAK,CAAE,IAAI,CACX,MAAM,CAAE,iBAA2C,CACrD,0BAAa,CACX,MAAM,CAAE,IAAI,CACZ,OAAO,CAAE,KAAK,CAMd,6RAAW,CACT,OAAO,CAAE,IAAI,CACb,UAAU,CAAE,MAAM,CAClB,SAAS,CAAE,IAAI,CAEf,mVAAO,CACL,UAAU,CAAE,OAAO,CACnB,OAAO,CAAE,GAAO,CAChB,WAAW,CAAE,WAAW,CACxB,OAAO,CAAE,YAAY,CACzB,mVAAmB,CACjB,OAAO,CAAE,YAAY,CAEzB,sBAAS,CACP,UAAU,CAAE,MAAM,CAGpB,qBAAQ,CACN,KAAK,CAAE,KAAK,CACZ,KAAK,CAAE,GAAG,CACV,OAAO,CAAE,KAAK,CACd,MAAM,CAAE,aAAuC,CAC/C,OAAO,CpBnG2B,IAAI,CoBoGtC,UAAU,ClBjFwB,OAAmB,CkBkFrD,MAAM,CAAE,iBAA+B,CAEvC,yEAAS,CACP,SAAS,CAAE,GAAG,CAChB,2BAAK,CACH,aAAa,CAAE,CAAC,CAClB,oCAAc,CACZ,OAAO,CAAE,KAAK,CACd,WAAW,ClBlFqB,0DAA8D,CkBmF9F,WAAW,CAAE,IAAI,CACjB,UAAU,ClB1FsB,OAAmB,CkB2FnD,OAAO,CAAE,QAA2C,CACpD,MAAM,CAAE,KAAkB,CAC1B,aAAa,CpBlHmB,IAAI,CoBmHpC,SAAS,CAAE,IAAI,CAEnB,yBAAY,CACV,UAAU,ClB9FwB,OAAO,CkB+FzC,OAAO,CAAE,YAAY,CACrB,WAAW,CAAE,IAAI,CACjB,OAAO,CAAE,KAAuB,CAGlC,iEAAwC,CACtC,cAAc,CAAE,KAAK,CACrB,SAAS,CAAE,GAAG,CAIhB,yEAAgD,CAC9C,UAAU,CAAE,IAAI,CAChB,MAAM,CAAE,IAAI,CACZ,KAAK,ClBhI6B,IAAY,CkBiI9C,+JAAM,CACJ,MAAM,CAAE,IAAI,CACZ,gBAAgB,CAAE,sBAAsB,CACxC,WAAW,CAAE,MAAM,CACrB,2FAAQ,CACN,YAAY,CAAE,CAAC,CACf,aAAa,CAAE,CAAC,CAChB,cAAc,CAAE,GAAG,CACrB,mKAAI,CACF,KAAK,ClBnJ2B,IAAK,CkB0JzC,6BAAgB,CAEd,MAAM,CAAE,IAAI,CACZ,gCAAE,CACA,MAAM,CAAE,IAAI,CACZ,WAAW,CAAE,GAAG,CAClB,uCAAW,CACT,OAAO,CAAE,YAAY,CACrB,UAAU,CAAE,GAAG,CACjB,yCAAW,CACT,aAAa,CAAE,IAAI,CACnB,UAAU,CAAE,IAAI,CAChB,WAAW,CAAE,MAAM,CACrB,yCAAW,CACT,UAAU,CAAE,IAAI,CAChB,YAAY,CAAE,CAAC,CAGnB,iDAAQ,CAEN,KAAK,CpBhM6B,IAAI,CoBiMtC,OAAO,CAAE,OAAO,CAChB,wHAAO,CACL,SAAS,CAAE,eAAe,CAC1B,WAAW,CAAE,MAAM,CAErB,yEAAS,CACP,KAAK,CpB1K2B,OAAI,CoB2KtC,wHAAW,CACT,WAAW,CAAE,IAAI,CACjB,KAAK,ClBjL2B,OAAW,CkBmL/C,uDAAY,CACV,KAAK,ClB1K6B,OAAK,CkB2KzC,eAAE,CACA,aAAa,CpBzLqB,IAAI,CoB0LtC,kBAAE,CACA,WAAW,CAAE,IAAI,CAEnB,6EAAgB,CACd,aAAa,CAAE,eAAgC,CAEjD,kBAAE,CACA,MAAM,CAAE,aAA4C,CAMxD,8BAAiB,CACf,aAAa,CpBxMqB,IAAI,CoB0MtC,iCAAE,CACA,OAAO,CAAE,KAAK,CACd,MAAM,CAAE,KAAuB,CAC/B,SAAS,CAAE,GAAG,CACd,WAAW,CAAE,MAAM,CACnB,UAAU,CAAE,OAA0B,CACtC,KAAK,ClBnM2B,OAAK,CkBoMrC,UAAU,CAAE,iBAAoC,CAChD,OAAO,CAAE,GAAqB,CAC9B,QAAQ,CAAE,QAAQ,CAClB,wCAAQ,CACN,KAAK,CAAE,OAA0B,CACnC,6CAAW,CACT,KAAK,ClBpNyB,OAAW,CkBqNzC,SAAS,CAAE,eAAe,CAE9B,oCAAK,CACH,aAAa,CAAE,GAAqB,CACpC,MAAM,CAAE,IAAI,CACZ,WAAW,CAAE,cAAuB,CACpC,UAAU,CAAE,OAAa,CACzB,KAAK,ClBnO2B,IAAK,CkBoOrC,gDAAW,CACT,KAAK,ClB9NyB,OAAW,CkB+NzC,SAAS,CAAE,eAAe,CAC9B,6CAAc,CACZ,UAAU,CAAE,CAAC,CAEf,uGAAQ,CACN,WAAW,CAAE,IAAI,CACjB,oRAA2B,CACzB,gBAAgB,CAAE,WAAW,CAC7B,MAAM,CAAE,IAAI,CACZ,OAAO,CAAE,CAAC,CACV,SAAS,CAAE,eAAe,CAC5B,kIAAU,CACR,WAAW,CAAE,IAAI,CAErB,wCAAS,CACP,OAAO,CAAE,YAAY,CACrB,OAAO,CAAE,KAAK,CACd,KAAK,CpBzQ2B,IAAI,CoB0QpC,WAAW,CAAE,IAAI,CACnB,wCAAS,CACP,OAAO,CAAE,YAAY,CACrB,aAAa,CAAE,GAAG,CAEtB,uDAA8B,CAC5B,OAAO,CAAE,YAAY,CACrB,KAAK,ClBhR6B,OAAM,CkBiRxC,SAAS,CAAE,GAAG,CACd,YAAY,CpB7PsB,IAAI,CoB8PxC,2BAAc,CACZ,OAAO,CAAE,KAAK,CACd,KAAK,CAAE,KAAK,CACd,qBAAQ,CACN,aAAa,CAAE,IAAI,CACnB,WAAW,CAAE,IAAI,CAEnB,mDAAa,CACX,UAAU,CAAE,OAAO,CACnB,OAAO,CAAE,OAAO,CAChB,WAAW,CAAE,MAAM,CACnB,WAAW,CAAE,OAAO,CACpB,SAAS,CAAE,OAAO,CAClB,KAAK,CAAE,OAAO,CACd,MAAM,CAAE,OAAO,CACf,WAAW,CAAE,OAAO,CACpB,qFAAgB,CACd,sBAAsB,CAAE,oBAAoB,CAG5C,mGAAQ,CACN,YAAY,CAAE,GAAG,CACvB,sBAAS,CACP,MAAM,CAAE,iBAAuC,CAC/C,UAAU,CAAE,OAA6B,CACzC,SAAS,CAAE,GAAG,CACd,WAAW,CAAE,GAAG,CAChB,aAAa,CAAE,GAAqB,CACpC,OAAO,CAAE,SAA4C,CACrD,MAAM,CAAE,QAA2B,CACrC,6BAAgB,CACd,UAAU,CAAE,MAAM,CjB7RlB,oCAAsB,CiBmStB,qBAAQ,CACN,KAAK,CAAE,IAAI,ECpUjB,wBAAwB,CACtB,KAAK,CnBkC+B,OAAW,CmBhCjD,KAAK,CACH,UAAU,CAAE,MAAM,YCHlB,WAAW,CAAE,aAAa,CAC1B,UAAU,CAAE,MAAM,CAClB,WAAW,CAAE,GAAG,CAChB,GAAG,CAAE,0GAA4G,YAGjH,WAAW,CAAE,aAAa,CAC1B,UAAU,CAAE,MAAM,CAClB,WAAW,CAAE,GAAG,CAChB,GAAG,CAAE,yGAA2G,YAGhH,WAAW,CAAE,MAAM,CACnB,UAAU,CAAE,MAAM,CAClB,WAAW,CAAE,GAAG,CAChB,GAAG,CAAE,6FAA+F,YAGpG,WAAW,CAAE,MAAM,CACnB,UAAU,CAAE,MAAM,CAClB,WAAW,CAAE,GAAG,CAChB,GAAG,CAAE,oFAAsF,YAG3F,WAAW,CAAE,aAAa,CAC1B,UAAU,CAAE,MAAM,CAClB,WAAW,CAAE,GAAG,CAChB,GAAG,CAAE,gHAAkH,YAGvH,WAAW,CAAE,aAAa,CAC1B,UAAU,CAAE,MAAM,CAClB,WAAW,CAAE,GAAG,CAChB,GAAG,CAAE,uGAAyG", -"sources": ["../../../bower_components/neat/app/assets/stylesheets/grid/_grid.scss","../../../bower_components/bourbon/dist/addons/_prefixer.scss","../../../bower_components/wyrm/sass/wyrm_core/_reset.sass","../../../bower_components/wyrm/sass/wyrm_core/_mixin.sass","../../../bower_components/font-awesome/scss/font-awesome.scss","../../../bower_components/font-awesome/scss/_path.scss","../../../bower_components/font-awesome/scss/_core.scss","../../../bower_components/font-awesome/scss/_larger.scss","../../../bower_components/font-awesome/scss/_fixed-width.scss","../../../bower_components/font-awesome/scss/_list.scss","../../../bower_components/font-awesome/scss/_variables.scss","../../../bower_components/font-awesome/scss/_bordered-pulled.scss","../../../bower_components/font-awesome/scss/_animated.scss","../../../bower_components/font-awesome/scss/_rotated-flipped.scss","../../../bower_components/font-awesome/scss/_mixins.scss","../../../bower_components/font-awesome/scss/_stacked.scss","../../../bower_components/font-awesome/scss/_icons.scss","../../../bower_components/font-awesome/scss/_screen-reader.scss","../../../bower_components/wyrm/sass/wyrm_core/_font_icon_defaults.sass","../../../bower_components/wyrm/sass/wyrm_core/_wy_variables.sass","../../../bower_components/wyrm/sass/wyrm_core/_alert.sass","../../../sass/_theme_variables.sass","../../../bower_components/neat/app/assets/stylesheets/grid/_media.scss","../../../bower_components/wyrm/sass/wyrm_core/_button.sass","../../../bower_components/wyrm/sass/wyrm_core/_dropdown.sass","../../../bower_components/wyrm/sass/wyrm_core/_form.sass","../../../bower_components/neat/app/assets/stylesheets/grid/_outer-container.scss","../../../bower_components/neat/app/assets/stylesheets/settings/_grid.scss","../../../bower_components/neat/app/assets/stylesheets/grid/_span-columns.scss","../../../bower_components/wyrm/sass/wyrm_core/_neat_extra.sass","../../../bower_components/wyrm/sass/wyrm_core/_generic.sass","../../../bower_components/wyrm/sass/wyrm_core/_table.sass","../../../bower_components/wyrm/sass/wyrm_core/_type.sass","../../../bower_components/wyrm/sass/wyrm_addons/pygments/_pygments.sass","../../../bower_components/wyrm/sass/wyrm_addons/pygments/_pygments_light.sass","../../../sass/_theme_breadcrumbs.sass","../../../sass/_theme_layout.sass","../../../bower_components/neat/app/assets/stylesheets/grid/_private.scss","../../../sass/_theme_badge.sass","../../../sass/_theme_rst.sass","../../../sass/_theme_mathjax.sass","../../../sass/_theme_font_local.sass"], -"names": [], -"file": "theme.css" -} diff --git a/docs/_static/doctools.js b/docs/_static/doctools.js deleted file mode 100644 index 816349563588..000000000000 --- a/docs/_static/doctools.js +++ /dev/null @@ -1,287 +0,0 @@ -/* - * doctools.js - * ~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for all documentation. - * - * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - -/** - * make the code below compatible with browsers without - * an installed firebug like debugger -if (!window.console || !console.firebug) { - var names = ["log", "debug", "info", "warn", "error", "assert", "dir", - "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", - "profile", "profileEnd"]; - window.console = {}; - for (var i = 0; i < names.length; ++i) - window.console[names[i]] = function() {}; -} - */ - -/** - * small helper function to urldecode strings - */ -jQuery.urldecode = function(x) { - return decodeURIComponent(x).replace(/\+/g, ' '); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s == 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node) { - if (node.nodeType == 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) { - var span = document.createElement("span"); - span.className = className; - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this); - }); - } - } - return this.each(function() { - highlight(this); - }); -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} - -/** - * Small JavaScript module for the documentation. - */ -var Documentation = { - - init : function() { - this.fixFirefoxAnchorBug(); - this.highlightSearchWords(); - this.initIndexTable(); - - }, - - /** - * i18n support - */ - TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; }, - LOCALE : 'unknown', - - // gettext and ngettext don't access this so that the functions - // can safely bound to a different name (_ = Documentation.gettext) - gettext : function(string) { - var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated == 'undefined') - return string; - return (typeof translated == 'string') ? translated : translated[0]; - }, - - ngettext : function(singular, plural, n) { - var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated == 'undefined') - return (n == 1) ? singular : plural; - return translated[Documentation.PLURALEXPR(n)]; - }, - - addTranslations : function(catalog) { - for (var key in catalog.messages) - this.TRANSLATIONS[key] = catalog.messages[key]; - this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); - this.LOCALE = catalog.locale; - }, - - /** - * add context elements like header anchor links - */ - addContextElements : function() { - $('div[id] > :header:first').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this headline')). - appendTo(this); - }); - $('dt[id]').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this definition')). - appendTo(this); - }); - }, - - /** - * workaround a firefox stupidity - * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 - */ - fixFirefoxAnchorBug : function() { - if (document.location.hash) - window.setTimeout(function() { - document.location.href += ''; - }, 10); - }, - - /** - * highlight the search words provided in the url in the text - */ - highlightSearchWords : function() { - var params = $.getQueryParameters(); - var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; - if (terms.length) { - var body = $('div.body'); - if (!body.length) { - body = $('body'); - } - window.setTimeout(function() { - $.each(terms, function() { - body.highlightText(this.toLowerCase(), 'highlighted'); - }); - }, 10); - $('') - .appendTo($('#searchbox')); - } - }, - - /** - * init the domain index toggle buttons - */ - initIndexTable : function() { - var togglers = $('img.toggler').click(function() { - var src = $(this).attr('src'); - var idnum = $(this).attr('id').substr(7); - $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) == 'minus.png') - $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); - else - $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); - }).css('display', ''); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { - togglers.click(); - } - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords : function() { - $('#searchbox .highlight-link').fadeOut(300); - $('span.highlighted').removeClass('highlighted'); - }, - - /** - * make the url absolute - */ - makeURL : function(relativeURL) { - return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; - }, - - /** - * get the current relative url - */ - getCurrentURL : function() { - var path = document.location.pathname; - var parts = path.split(/\//); - $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this == '..') - parts.pop(); - }); - var url = parts.join('/'); - return path.substring(url.lastIndexOf('/') + 1, path.length - 1); - }, - - initOnKeyListeners: function() { - $(document).keyup(function(event) { - var activeElementType = document.activeElement.tagName; - // don't navigate when in search box or textarea - if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { - switch (event.keyCode) { - case 37: // left - var prevHref = $('link[rel="prev"]').prop('href'); - if (prevHref) { - window.location.href = prevHref; - return false; - } - case 39: // right - var nextHref = $('link[rel="next"]').prop('href'); - if (nextHref) { - window.location.href = nextHref; - return false; - } - } - } - }); - } -}; - -// quick alias for translations -_ = Documentation.gettext; - -$(document).ready(function() { - Documentation.init(); -}); \ No newline at end of file diff --git a/docs/_static/down-pressed.png b/docs/_static/down-pressed.png deleted file mode 100644 index 5756c8cad885..000000000000 Binary files a/docs/_static/down-pressed.png and /dev/null differ diff --git a/docs/_static/down.png b/docs/_static/down.png deleted file mode 100644 index 1b3bdad2ceff..000000000000 Binary files a/docs/_static/down.png and /dev/null differ diff --git a/docs/_static/file.png b/docs/_static/file.png deleted file mode 100644 index a858a410e4fa..000000000000 Binary files a/docs/_static/file.png and /dev/null differ diff --git a/docs/_static/fonts/FontAwesome.otf b/docs/_static/fonts/FontAwesome.otf deleted file mode 100644 index d4de13e832d5..000000000000 Binary files a/docs/_static/fonts/FontAwesome.otf and /dev/null differ diff --git a/docs/_static/fonts/Inconsolata-Bold.ttf b/docs/_static/fonts/Inconsolata-Bold.ttf deleted file mode 100644 index 809c1f5828f8..000000000000 Binary files a/docs/_static/fonts/Inconsolata-Bold.ttf and /dev/null differ diff --git a/docs/_static/fonts/Inconsolata-Regular.ttf b/docs/_static/fonts/Inconsolata-Regular.ttf deleted file mode 100644 index fc981ce7ad6c..000000000000 Binary files a/docs/_static/fonts/Inconsolata-Regular.ttf and /dev/null differ diff --git a/docs/_static/fonts/Lato-Bold.ttf b/docs/_static/fonts/Lato-Bold.ttf deleted file mode 100644 index 1d23c7066e09..000000000000 Binary files a/docs/_static/fonts/Lato-Bold.ttf and /dev/null differ diff --git a/docs/_static/fonts/Lato-Regular.ttf b/docs/_static/fonts/Lato-Regular.ttf deleted file mode 100644 index 0f3d0f837d24..000000000000 Binary files a/docs/_static/fonts/Lato-Regular.ttf and /dev/null differ diff --git a/docs/_static/fonts/RobotoSlab-Bold.ttf b/docs/_static/fonts/RobotoSlab-Bold.ttf deleted file mode 100644 index df5d1df27304..000000000000 Binary files a/docs/_static/fonts/RobotoSlab-Bold.ttf and /dev/null differ diff --git a/docs/_static/fonts/RobotoSlab-Regular.ttf b/docs/_static/fonts/RobotoSlab-Regular.ttf deleted file mode 100644 index eb52a7907362..000000000000 Binary files a/docs/_static/fonts/RobotoSlab-Regular.ttf and /dev/null differ diff --git a/docs/_static/fonts/fontawesome-webfont.eot b/docs/_static/fonts/fontawesome-webfont.eot deleted file mode 100644 index c7b00d2ba889..000000000000 Binary files a/docs/_static/fonts/fontawesome-webfont.eot and /dev/null differ diff --git a/docs/_static/fonts/fontawesome-webfont.svg b/docs/_static/fonts/fontawesome-webfont.svg deleted file mode 100644 index 8b66187fe067..000000000000 --- a/docs/_static/fonts/fontawesome-webfont.svg +++ /dev/null @@ -1,685 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_static/fonts/fontawesome-webfont.ttf b/docs/_static/fonts/fontawesome-webfont.ttf deleted file mode 100644 index f221e50a2ef6..000000000000 Binary files a/docs/_static/fonts/fontawesome-webfont.ttf and /dev/null differ diff --git a/docs/_static/fonts/fontawesome-webfont.woff b/docs/_static/fonts/fontawesome-webfont.woff deleted file mode 100644 index 6e7483cf61b4..000000000000 Binary files a/docs/_static/fonts/fontawesome-webfont.woff and /dev/null differ diff --git a/docs/_static/fonts/fontawesome-webfont.woff2 b/docs/_static/fonts/fontawesome-webfont.woff2 deleted file mode 100644 index 7eb74fd127ee..000000000000 Binary files a/docs/_static/fonts/fontawesome-webfont.woff2 and /dev/null differ diff --git a/docs/_static/img/dynamic_graph.gif b/docs/_static/img/dynamic_graph.gif deleted file mode 100644 index b4f17374e034..000000000000 Binary files a/docs/_static/img/dynamic_graph.gif and /dev/null differ diff --git a/docs/_static/img/pytorch-logo-dark.png b/docs/_static/img/pytorch-logo-dark.png deleted file mode 100644 index 0288a564e227..000000000000 Binary files a/docs/_static/img/pytorch-logo-dark.png and /dev/null differ diff --git a/docs/_static/img/pytorch-logo-dark.svg b/docs/_static/img/pytorch-logo-dark.svg deleted file mode 100644 index 717a3ce942f8..000000000000 --- a/docs/_static/img/pytorch-logo-dark.svg +++ /dev/null @@ -1,24 +0,0 @@ - - - - - - - - - - - - - diff --git a/docs/_static/img/tensor_illustration.png b/docs/_static/img/tensor_illustration.png deleted file mode 100644 index b0039c7f3f3e..000000000000 Binary files a/docs/_static/img/tensor_illustration.png and /dev/null differ diff --git a/docs/_static/jquery-3.1.0.js b/docs/_static/jquery-3.1.0.js deleted file mode 100644 index f2fc2747874e..000000000000 --- a/docs/_static/jquery-3.1.0.js +++ /dev/null @@ -1,10074 +0,0 @@ -/*eslint-disable no-unused-vars*/ -/*! - * jQuery JavaScript Library v3.1.0 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright jQuery Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2016-07-07T21:44Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var document = window.document; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var concat = arr.concat; - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - - - - function DOMEval( code, doc ) { - doc = doc || document; - - var script = doc.createElement( "script" ); - - script.text = code; - doc.head.appendChild( script ).parentNode.removeChild( script ); - } -/* global Symbol */ -// Defining this global in .eslintrc would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.1.0", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }, - - // Support: Android <=4.0 only - // Make sure we trim BOM and NBSP - rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, - - // Matches dashed string for camelizing - rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g, - - // Used by jQuery.camelCase as callback to replace() - fcamelCase = function( all, letter ) { - return letter.toUpperCase(); - }; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - return num != null ? - - // Return just the one element from the set - ( num < 0 ? this[ num + this.length ] : this[ num ] ) : - - // Return all the elements in a clean array - slice.call( this ); - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - src = target[ name ]; - copy = options[ name ]; - - // Prevent never-ending loop - if ( target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = jQuery.isArray( copy ) ) ) ) { - - if ( copyIsArray ) { - copyIsArray = false; - clone = src && jQuery.isArray( src ) ? src : []; - - } else { - clone = src && jQuery.isPlainObject( src ) ? src : {}; - } - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isFunction: function( obj ) { - return jQuery.type( obj ) === "function"; - }, - - isArray: Array.isArray, - - isWindow: function( obj ) { - return obj != null && obj === obj.window; - }, - - isNumeric: function( obj ) { - - // As of jQuery 3.0, isNumeric is limited to - // strings and numbers (primitives or objects) - // that can be coerced to finite numbers (gh-2662) - var type = jQuery.type( obj ); - return ( type === "number" || type === "string" ) && - - // parseFloat NaNs numeric-cast false positives ("") - // ...but misinterprets leading-number strings, particularly hex literals ("0x...") - // subtraction forces infinities to NaN - !isNaN( obj - parseFloat( obj ) ); - }, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - - /* eslint-disable no-unused-vars */ - // See https://github.com/eslint/eslint/issues/6125 - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - type: function( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; - }, - - // Evaluates a script in a global context - globalEval: function( code ) { - DOMEval( code ); - }, - - // Convert dashed to camelCase; used by the css and data modules - // Support: IE <=9 - 11, Edge 12 - 13 - // Microsoft forgot to hump their vendor prefix (#9572) - camelCase: function( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); - }, - - nodeName: function( elem, name ) { - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // Support: Android <=4.0 only - trim: function( text ) { - return text == null ? - "" : - ( text + "" ).replace( rtrim, "" ); - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return concat.apply( [], ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // Bind a function to a context, optionally partially applying any - // arguments. - proxy: function( fn, context ) { - var tmp, args, proxy; - - if ( typeof context === "string" ) { - tmp = fn[ context ]; - context = fn; - fn = tmp; - } - - // Quick check to determine if target is callable, in the spec - // this throws a TypeError, but we will just return undefined. - if ( !jQuery.isFunction( fn ) ) { - return undefined; - } - - // Simulated bind - args = slice.call( arguments, 2 ); - proxy = function() { - return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); - }; - - // Set the guid of unique handler to the same of original handler, so it can be removed - proxy.guid = fn.guid = fn.guid || jQuery.guid++; - - return proxy; - }, - - now: Date.now, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), -function( i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -} ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = jQuery.type( obj ); - - if ( type === "function" || jQuery.isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.0 - * https://sizzlejs.com/ - * - * Copyright jQuery Foundation and other contributors - * Released under the MIT license - * http://jquery.org/license - * - * Date: 2016-01-04 - */ -(function( window ) { - -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ({}).hasOwnProperty, - arr = [], - pop = arr.pop, - push_native = arr.push, - push = arr.push, - slice = arr.slice, - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[i] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier - identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + - "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), - - rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + - "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + - "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + - whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), - funescape = function( _, escaped, escapedWhitespace ) { - var high = "0x" + escaped - 0x10000; - // NaN means non-codepoint - // Support: Firefox<24 - // Workaround erroneous numeric interpretation of +"0x" - return high !== high || escapedWhitespace ? - escaped : - high < 0 ? - // BMP codepoint - String.fromCharCode( high + 0x10000 ) : - // Supplemental Plane codepoint (surrogate pair) - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\x80-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - disabledAncestor = addCombinator( - function( elem ) { - return elem.disabled === true; - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - (arr = slice.call( preferredDoc.childNodes )), - preferredDoc.childNodes - ); - // Support: Android<4.0 - // Detect silently failing push.apply - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - push_native.apply( target, slice.call(els) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - // Can't trust NodeList.length - while ( (target[j++] = els[i++]) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - - if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { - setDocument( context ); - } - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { - - // ID selector - if ( (m = match[1]) ) { - - // Document context - if ( nodeType === 9 ) { - if ( (elem = context.getElementById( m )) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && (elem = newContext.getElementById( m )) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[2] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( (m = match[3]) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !compilerCache[ selector + " " ] && - (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { - - if ( nodeType !== 1 ) { - newContext = context; - newSelector = selector; - - // qSA looks outside Element context, which is not what we want - // Thanks to Andrew Dupont for this workaround technique - // Support: IE <=8 - // Exclude object elements - } else if ( context.nodeName.toLowerCase() !== "object" ) { - - // Capture the context ID, setting it first if necessary - if ( (nid = context.getAttribute( "id" )) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", (nid = expando) ); - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[i] = "#" + nid + " " + toSelector( groups[i] ); - } - newSelector = groups.join( "," ); - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - } - - if ( newSelector ) { - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return (cache[ key + " " ] = value); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement("fieldset"); - - try { - return !!fn( el ); - } catch (e) { - return false; - } finally { - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split("|"), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[i] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( (cur = cur.nextSibling) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return (name === "input" || name === "button") && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - // Known :disabled false positives: - // IE: *[disabled]:not(button, input, select, textarea, optgroup, option, menuitem, fieldset) - // not IE: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Check form elements and option elements for explicit disabling - return "label" in elem && elem.disabled === disabled || - "form" in elem && elem.disabled === disabled || - - // Check non-disabled form elements for fieldset[disabled] ancestors - "form" in elem && elem.disabled === false && ( - // Support: IE6-11+ - // Ancestry is covered for us - elem.isDisabled === disabled || - - // Otherwise, assume any non-