diff --git a/.github/ISSUE_TEMPLATE/documentation-content.md b/.github/ISSUE_TEMPLATE/documentation-content.md deleted file mode 100644 index bf6a1e5b3f0e..000000000000 --- a/.github/ISSUE_TEMPLATE/documentation-content.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -name: "\U0001F4DADocumentation Content" -about: Report an issue related to the documentation content on https://pytorch.org - ---- - -## 📚 Documentation - -(Add a clear and concise description of what the documentation content issue is. A link to any relevant https://pytorch.org page is helpful if you have one.) diff --git a/.github/ISSUE_TEMPLATE/website-issue.md b/.github/ISSUE_TEMPLATE/website-issue.md deleted file mode 100644 index efc8fef24039..000000000000 --- a/.github/ISSUE_TEMPLATE/website-issue.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -name: "\U0001F54B Website Issue" -about: Report an issue with the https://pytorch.org website itself - ---- - -## 🕋 Website - - - -## To Reproduce - -Steps to reproduce the behavior (if applicable): - -1. Go to '...' -2. Click on '....' -3. Scroll down to '....' -4. See error - -## Expected behavior - - - -## Screenshots - - - -## Desktop (please complete the following information): - - - OS: [e.g. iOS] - - Browser [e.g. chrome, safari] - - Version [e.g. 22] - -## Additional context - - diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml deleted file mode 100644 index 23a8f5ffbad9..000000000000 --- a/.github/workflows/build.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: Build - -on: - push: - branches: - - site - workflow_dispatch: - -jobs: - tests: - uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main - secrets: inherit - with: - runner: linux.12xlarge - repository: pytorch/pytorch.github.io - docker-image: cimg/ruby:2.7-node - secrets-env: PYTORCHBOT_TOKEN - script: | - git config --global --add safe.directory /__w/pytorch.github.io/pytorch.github.io - set -euxo pipefail - - ## Bundle Install - cd - mkdir .bundle - bundle config path '~/vendor/bundle' - git clone https://github.com/pytorch/pytorch.github.io.git - cd pytorch.github.io - bundle install - - ## Yarn Install - yarn install --cache-folder ~/.cache/yarn - - ## Notedown Install - sudo apt update && sudo apt install python3-pip && sudo -H pip3 install pyrsistent==0.16 notedown pyyaml -Iv nbformat==5.7 - - ## Configure Bot - git config --global user.email "facebook-circleci-bot@users.noreply.github.com" - git config --global user.name "Website Deployment Script" - - ## Build Jekyll site and push to master - ./scripts/deploy-site.sh build diff --git a/.github/workflows/update-quick-start-module.yml b/.github/workflows/update-quick-start-module.yml deleted file mode 100644 index bf6956011c9f..000000000000 --- a/.github/workflows/update-quick-start-module.yml +++ /dev/null @@ -1,118 +0,0 @@ -name: Update quick start module -on: - schedule: - # At 18:30 pm UTC (1:30 pm EST) - - cron: "30 18 * * *" - pull_request: - paths: - - .github/workflows/update-quick-start-module.yml - - scripts/gen_quick_start_module.py - - _includes/quick-start-module.js - - _includes/quick_start_local.html - push: - branches: - site - paths: - - .github/workflows/update-quick-start-module.yml - - scripts/gen_quick_start_module.py - - _includes/quick-start-module.js - - _includes/quick_start_local.html - workflow_dispatch: - -jobs: - linux-nightly-matrix: - uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main - with: - package-type: all - os: linux - channel: "nightly" - getting-started: true - windows-nightly-matrix: - uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main - with: - package-type: all - os: windows - channel: "nightly" - getting-started: true - macos-arm64-nightly-matrix: - uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main - with: - package-type: all - os: macos-arm64 - channel: "nightly" - getting-started: true - linux-release-matrix: - needs: [linux-nightly-matrix] - uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main - with: - package-type: all - os: linux - channel: "release" - getting-started: true - windows-release-matrix: - needs: [windows-nightly-matrix] - uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main - with: - package-type: all - os: windows - channel: "release" - getting-started: true - macos-arm64-release-matrix: - needs: [macos-arm64-nightly-matrix] - uses: pytorch/test-infra/.github/workflows/generate_binary_build_matrix.yml@main - with: - package-type: all - os: macos-arm64 - channel: "release" - getting-started: true - - update-quick-start: - needs: [linux-nightly-matrix, windows-nightly-matrix, macos-arm64-nightly-matrix, - linux-release-matrix, windows-release-matrix, macos-arm64-release-matrix] - runs-on: "ubuntu-latest" - environment: pytorchbot-env - steps: - - name: Checkout pytorch.github.io - uses: actions/checkout@v2 - - name: Setup Python - uses: actions/setup-python@v2 - with: - python-version: 3.9 - architecture: x64 - - name: Create json file - shell: bash - env: - LINUX_NIGHTLY_MATRIX: ${{ needs.linux-nightly-matrix.outputs.matrix }} - WINDOWS_NIGHTLY_MATRIX: ${{ needs.windows-nightly-matrix.outputs.matrix }} - MACOS_NIGHTLY_MATRIX: ${{ needs.macos-arm64-nightly-matrix.outputs.matrix }} - LINUX_RELEASE_MATRIX: ${{ needs.linux-release-matrix.outputs.matrix }} - WINDOWS_RELEASE_MATRIX: ${{ needs.windows-release-matrix.outputs.matrix }} - MACOS_RELEASE_MATRIX: ${{ needs.macos-arm64-release-matrix.outputs.matrix }} - run: | - set -ex - printf '%s\n' "$LINUX_NIGHTLY_MATRIX" > linux_nightly_matrix.json - printf '%s\n' "$WINDOWS_NIGHTLY_MATRIX" > windows_nightly_matrix.json - printf '%s\n' "$MACOS_NIGHTLY_MATRIX" > macos_nightly_matrix.json - printf '%s\n' "$LINUX_RELEASE_MATRIX" > linux_release_matrix.json - printf '%s\n' "$WINDOWS_RELEASE_MATRIX" > windows_release_matrix.json - printf '%s\n' "$MACOS_RELEASE_MATRIX" > macos_release_matrix.json - python3 ./scripts/gen_quick_start_module.py --autogenerate > assets/quick-start-module.js - rm *_matrix.json - - name: Create Issue if failed - uses: dacbd/create-issue-action@main - if: ${{ failure() }} # only run when this job is failed. - with: - title: Updating quick start module failed - token: ${{secrets.PYTORCHBOT_TOKEN}} - assignees: ${{github.actor}} - labels: bug - body: Updating quick start module failed, please fix update quick start module - - name: Create Pull Request - uses: peter-evans/create-pull-request@v3 - with: - token: ${{ secrets.PYTORCHBOT_TOKEN }} - commit-message: Modify published_versions.json, releases.json and quick-start-module.js - title: '[Getting Started Page] Modify published_versions.json, releases.json and quick-start-module.js' - body: > - This PR is auto-generated. It updates Getting Started page - labels: automated pr diff --git a/.github/workflows/validate-quick-start-module.yml b/.github/workflows/validate-quick-start-module.yml deleted file mode 100644 index 2813be181d01..000000000000 --- a/.github/workflows/validate-quick-start-module.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: Validate quick start module -on: - pull_request: - branches: - site - paths: - - published_versions.json - - assets/quick-start-module.js - - .github/workflows/validate-quick-start-module.yml - push: - branches: - site - paths: - - published_versions.json - - assets/quick-start-module.js - - .github/workflows/validate-quick-start-module.yml - workflow_dispatch: - -jobs: - validate-nightly-binaries: - uses: pytorch/test-infra/.github/workflows/validate-binaries.yml@main - with: - os: all - channel: "nightly" - validate-release-binaries: - if: always() - uses: pytorch/test-infra/.github/workflows/validate-binaries.yml@main - needs: validate-nightly-binaries - with: - os: all - channel: "release" diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 90a766742d95..000000000000 --- a/.gitignore +++ /dev/null @@ -1,18 +0,0 @@ -.DS_Store -node_modules -yarn-error.log -/vendor -# These are NOT autogenerated. Check in files as necessary. -!docs/stable/_static/js/vendor/ -!docs/master/_static/js/vendor/ -.bundle -.sass_cache -_site - -.idea/ - -.jekyll-metadata - -.vscode/ - -.netlify/ diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index df1b3b80f323..000000000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "_hub"] - path = _hub - url = https://github.com/pytorch/hub.git diff --git a/.nvmrc b/.nvmrc deleted file mode 100644 index 834eb3fa85bf..000000000000 --- a/.nvmrc +++ /dev/null @@ -1 +0,0 @@ -9.8.0 diff --git a/.ruby-version b/.ruby-version deleted file mode 100644 index 6a81b4c83794..000000000000 --- a/.ruby-version +++ /dev/null @@ -1 +0,0 @@ -2.7.8 diff --git a/404.html b/404.html index 1e39ebf5a0cc..b9383b59f6f8 100644 --- a/404.html +++ b/404.html @@ -1,11 +1,115 @@ ---- -title: Oops! -id: 404 -permalink: /404.html -layout: general ---- - - + + + + + + + + Oops! | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +
+ + + + + + + + +
+ +
+
+ +
+
+
+
+
- +

Oops!

@@ -115,4 +219,174 @@

Click here to go back to the main page.

-
\ No newline at end of file +
+ +
+
+ + +
+
+
+
+

Docs

+

Access comprehensive developer documentation for PyTorch

+ View Docs +
+ +
+

Tutorials

+

Get in-depth tutorials for beginners and advanced developers

+ View Tutorials +
+ +
+

Resources

+

Find development resources and get your questions answered

+ View Resources +
+
+
+
+ + + +
+
+
+
+ + +
+
+
+ + +
+ + + + + + + + + + + + + + + + + + + + + + + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000000..90e93bd32f19 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to hub +We want to make contributing to this project as easy and transparent as +possible. + +## Pull Requests +We actively welcome your pull requests. + +1. Fork the repo and create your branch from `master`. +2. If you've added code that should be tested, add tests. +3. If you've changed APIs, update the documentation. +4. Ensure the test suite passes. +5. Make sure your code lints. +6. If you haven't already, complete the Contributor License Agreement ("CLA"). + +## Contributor License Agreement ("CLA") +In order to accept your pull request, we need you to submit a CLA. You only need +to do this once to work on any of Facebook's open source projects. + +Complete your CLA here: + +## Issues +We use GitHub issues to track public bugs. Please ensure your description is +clear and has sufficient instructions to be able to reproduce the issue. + +Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe +disclosure of security bugs. In those cases, please go through the process +outlined on that page and do not file a public issue. + +## License +By contributing to hub, you agree that your contributions will be licensed +under the LICENSE file in the root directory of this source tree. \ No newline at end of file diff --git a/Gemfile b/Gemfile deleted file mode 100644 index 166b9e4fa750..000000000000 --- a/Gemfile +++ /dev/null @@ -1,9 +0,0 @@ -source "https://rubygems.org" -ruby "2.7.8" - -group :jekyll_plugins do - gem "github-pages" - gem "jekyll-paginate-v2" - gem 'jekyll-autoprefixer' - gem 'jekyll-feed' -end diff --git a/Gemfile.lock b/Gemfile.lock deleted file mode 100644 index f0011c355d87..000000000000 --- a/Gemfile.lock +++ /dev/null @@ -1,277 +0,0 @@ -GEM - remote: https://rubygems.org/ - specs: - activesupport (6.0.6.1) - concurrent-ruby (~> 1.0, >= 1.0.2) - i18n (>= 0.7, < 2) - minitest (~> 5.1) - tzinfo (~> 1.1) - zeitwerk (~> 2.2, >= 2.2.2) - addressable (2.8.0) - public_suffix (>= 2.0.2, < 5.0) - autoprefixer-rails (9.8.6.5) - execjs - coffee-script (2.4.1) - coffee-script-source - execjs - coffee-script-source (1.11.1) - colorator (1.1.0) - commonmarker (0.17.13) - ruby-enum (~> 0.5) - concurrent-ruby (1.2.0) - dnsruby (1.61.5) - simpleidn (~> 0.1) - em-websocket (0.5.2) - eventmachine (>= 0.12.9) - http_parser.rb (~> 0.6.0) - ethon (0.12.0) - ffi (>= 1.3.0) - eventmachine (1.2.7) - execjs (2.7.0) - faraday (1.3.0) - faraday-net_http (~> 1.0) - multipart-post (>= 1.2, < 3) - ruby2_keywords - faraday-net_http (1.0.1) - ffi (1.15.0) - forwardable-extended (2.6.0) - gemoji (3.0.1) - github-pages (214) - github-pages-health-check (= 1.17.0) - jekyll (= 3.9.0) - jekyll-avatar (= 0.7.0) - jekyll-coffeescript (= 1.1.1) - jekyll-commonmark-ghpages (= 0.1.6) - jekyll-default-layout (= 0.1.4) - jekyll-feed (= 0.15.1) - jekyll-gist (= 1.5.0) - jekyll-github-metadata (= 2.13.0) - jekyll-mentions (= 1.6.0) - jekyll-optional-front-matter (= 0.3.2) - jekyll-paginate (= 1.1.0) - jekyll-readme-index (= 0.3.0) - jekyll-redirect-from (= 0.16.0) - jekyll-relative-links (= 0.6.1) - jekyll-remote-theme (= 0.4.3) - jekyll-sass-converter (= 1.5.2) - jekyll-seo-tag (= 2.7.1) - jekyll-sitemap (= 1.4.0) - jekyll-swiss (= 1.0.0) - jekyll-theme-architect (= 0.1.1) - jekyll-theme-cayman (= 0.1.1) - jekyll-theme-dinky (= 0.1.1) - jekyll-theme-hacker (= 0.1.2) - jekyll-theme-leap-day (= 0.1.1) - jekyll-theme-merlot (= 0.1.1) - jekyll-theme-midnight (= 0.1.1) - jekyll-theme-minimal (= 0.1.1) - jekyll-theme-modernist (= 0.1.1) - jekyll-theme-primer (= 0.5.4) - jekyll-theme-slate (= 0.1.1) - jekyll-theme-tactile (= 0.1.1) - jekyll-theme-time-machine (= 0.1.1) - jekyll-titles-from-headings (= 0.5.3) - jemoji (= 0.12.0) - kramdown (= 2.3.1) - kramdown-parser-gfm (= 1.1.0) - liquid (= 4.0.3) - mercenary (~> 0.3) - minima (= 2.5.1) - nokogiri (>= 1.10.4, < 2.0) - rouge (= 3.26.0) - terminal-table (~> 1.4) - github-pages-health-check (1.17.0) - addressable (~> 2.3) - dnsruby (~> 1.60) - octokit (~> 4.0) - public_suffix (>= 2.0.2, < 5.0) - typhoeus (~> 1.3) - html-pipeline (2.14.0) - activesupport (>= 2) - nokogiri (>= 1.4) - http_parser.rb (0.6.0) - i18n (0.9.5) - concurrent-ruby (~> 1.0) - jekyll (3.9.0) - addressable (~> 2.4) - colorator (~> 1.0) - em-websocket (~> 0.5) - i18n (~> 0.7) - jekyll-sass-converter (~> 1.0) - jekyll-watch (~> 2.0) - kramdown (>= 1.17, < 3) - liquid (~> 4.0) - mercenary (~> 0.3.3) - pathutil (~> 0.9) - rouge (>= 1.7, < 4) - safe_yaml (~> 1.0) - jekyll-autoprefixer (1.0.2) - autoprefixer-rails (~> 9.3) - jekyll-avatar (0.7.0) - jekyll (>= 3.0, < 5.0) - jekyll-coffeescript (1.1.1) - coffee-script (~> 2.2) - coffee-script-source (~> 1.11.1) - jekyll-commonmark (1.3.1) - commonmarker (~> 0.14) - jekyll (>= 3.7, < 5.0) - jekyll-commonmark-ghpages (0.1.6) - commonmarker (~> 0.17.6) - jekyll-commonmark (~> 1.2) - rouge (>= 2.0, < 4.0) - jekyll-default-layout (0.1.4) - jekyll (~> 3.0) - jekyll-feed (0.15.1) - jekyll (>= 3.7, < 5.0) - jekyll-gist (1.5.0) - octokit (~> 4.2) - jekyll-github-metadata (2.13.0) - jekyll (>= 3.4, < 5.0) - octokit (~> 4.0, != 4.4.0) - jekyll-mentions (1.6.0) - html-pipeline (~> 2.3) - jekyll (>= 3.7, < 5.0) - jekyll-optional-front-matter (0.3.2) - jekyll (>= 3.0, < 5.0) - jekyll-paginate (1.1.0) - jekyll-paginate-v2 (3.0.0) - jekyll (>= 3.0, < 5.0) - jekyll-readme-index (0.3.0) - jekyll (>= 3.0, < 5.0) - jekyll-redirect-from (0.16.0) - jekyll (>= 3.3, < 5.0) - jekyll-relative-links (0.6.1) - jekyll (>= 3.3, < 5.0) - jekyll-remote-theme (0.4.3) - addressable (~> 2.0) - jekyll (>= 3.5, < 5.0) - jekyll-sass-converter (>= 1.0, <= 3.0.0, != 2.0.0) - rubyzip (>= 1.3.0, < 3.0) - jekyll-sass-converter (1.5.2) - sass (~> 3.4) - jekyll-seo-tag (2.7.1) - jekyll (>= 3.8, < 5.0) - jekyll-sitemap (1.4.0) - jekyll (>= 3.7, < 5.0) - jekyll-swiss (1.0.0) - jekyll-theme-architect (0.1.1) - jekyll (~> 3.5) - jekyll-seo-tag (~> 2.0) - jekyll-theme-cayman (0.1.1) - jekyll (~> 3.5) - jekyll-seo-tag (~> 2.0) - jekyll-theme-dinky (0.1.1) - jekyll (~> 3.5) - jekyll-seo-tag (~> 2.0) - jekyll-theme-hacker (0.1.2) - jekyll (> 3.5, < 5.0) - jekyll-seo-tag (~> 2.0) - jekyll-theme-leap-day (0.1.1) - jekyll (~> 3.5) - jekyll-seo-tag (~> 2.0) - jekyll-theme-merlot (0.1.1) - jekyll (~> 3.5) - jekyll-seo-tag (~> 2.0) - jekyll-theme-midnight (0.1.1) - jekyll (~> 3.5) - jekyll-seo-tag (~> 2.0) - jekyll-theme-minimal (0.1.1) - jekyll (~> 3.5) - jekyll-seo-tag (~> 2.0) - jekyll-theme-modernist (0.1.1) - jekyll (~> 3.5) - jekyll-seo-tag (~> 2.0) - jekyll-theme-primer (0.5.4) - jekyll (> 3.5, < 5.0) - jekyll-github-metadata (~> 2.9) - jekyll-seo-tag (~> 2.0) - jekyll-theme-slate (0.1.1) - jekyll (~> 3.5) - jekyll-seo-tag (~> 2.0) - jekyll-theme-tactile (0.1.1) - jekyll (~> 3.5) - jekyll-seo-tag (~> 2.0) - jekyll-theme-time-machine (0.1.1) - jekyll (~> 3.5) - jekyll-seo-tag (~> 2.0) - jekyll-titles-from-headings (0.5.3) - jekyll (>= 3.3, < 5.0) - jekyll-watch (2.2.1) - listen (~> 3.0) - jemoji (0.12.0) - gemoji (~> 3.0) - html-pipeline (~> 2.2) - jekyll (>= 3.0, < 5.0) - kramdown (2.3.1) - rexml - kramdown-parser-gfm (1.1.0) - kramdown (~> 2.0) - liquid (4.0.3) - listen (3.5.1) - rb-fsevent (~> 0.10, >= 0.10.3) - rb-inotify (~> 0.9, >= 0.9.10) - mercenary (0.3.6) - mini_portile2 (2.8.1) - minima (2.5.1) - jekyll (>= 3.5, < 5.0) - jekyll-feed (~> 0.9) - jekyll-seo-tag (~> 2.1) - minitest (5.17.0) - multipart-post (2.1.1) - nokogiri (1.14.3) - mini_portile2 (~> 2.8.0) - racc (~> 1.4) - octokit (4.20.0) - faraday (>= 0.9) - sawyer (~> 0.8.0, >= 0.5.3) - pathutil (0.16.2) - forwardable-extended (~> 2.6) - public_suffix (4.0.6) - racc (1.6.2) - rb-fsevent (0.10.4) - rb-inotify (0.10.1) - ffi (~> 1.0) - rexml (3.2.5) - rouge (3.26.0) - ruby-enum (0.9.0) - i18n - ruby2_keywords (0.0.4) - rubyzip (2.3.0) - safe_yaml (1.0.5) - sass (3.7.4) - sass-listen (~> 4.0.0) - sass-listen (4.0.0) - rb-fsevent (~> 0.9, >= 0.9.4) - rb-inotify (~> 0.9, >= 0.9.7) - sawyer (0.8.2) - addressable (>= 2.3.5) - faraday (> 0.8, < 2.0) - simpleidn (0.2.1) - unf (~> 0.1.4) - terminal-table (1.8.0) - unicode-display_width (~> 1.1, >= 1.1.1) - thread_safe (0.3.6) - typhoeus (1.4.0) - ethon (>= 0.9.0) - tzinfo (1.2.11) - thread_safe (~> 0.1) - unf (0.1.4) - unf_ext - unf_ext (0.0.7.7) - unicode-display_width (1.7.0) - zeitwerk (2.6.7) - -PLATFORMS - ruby - -DEPENDENCIES - github-pages - jekyll-autoprefixer - jekyll-feed - jekyll-paginate-v2 - -RUBY VERSION - ruby 2.7.8p225 - -BUNDLED WITH - 1.17.2 diff --git a/Makefile b/Makefile deleted file mode 100644 index dcbcbf191848..000000000000 --- a/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -SHELL := /bin/bash -BUNDLE := bundle -YARN := yarn -VENDOR_DIR = assets/vendor/ -JEKYLL := $(BUNDLE) exec jekyll - -PROJECT_DEPS := Gemfile package.json - -.PHONY: all clean install update - -all : serve - -check: - $(JEKYLL) doctor - $(HTMLPROOF) --check-html \ - --http-status-ignore 999 \ - --internal-domains localhost:4000 \ - --assume-extension \ - _site - -install: $(PROJECT_DEPS) - $(BUNDLE) install --path vendor/bundler - $(YARN) install - -update: $(PROJECT_DEPS) - $(BUNDLE) update - $(YARN) upgrade - -include-yarn-deps: - mkdir -p $(VENDOR_DIR) - cp node_modules/jquery/dist/jquery.min.js $(VENDOR_DIR) - cp node_modules/popper.js/dist/umd/popper.min.js $(VENDOR_DIR) - cp node_modules/bootstrap/dist/js/bootstrap.min.js $(VENDOR_DIR) - cp node_modules/anchor-js/anchor.min.js $(VENDOR_DIR) - -build: install include-yarn-deps - $(JEKYLL) build --config _config.yml - -serve: install include-yarn-deps - JEKYLL_ENV=development $(JEKYLL) serve --incremental --config _config.yml - -build_deploy: include-yarn-deps - JEKYLL_ENV=production $(JEKYLL) build diff --git a/README.md b/README.md deleted file mode 100644 index f3cad9ccd536..000000000000 --- a/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# pytorch.org site - -[https://pytorch.org](https://pytorch.org) - -A static website built in [Jekyll](https://jekyllrb.com/) and [Bootstrap](https://getbootstrap.com/) for [PyTorch](https://pytorch.org/), and its tutorials and documentation. - -## Prerequisites - -Install the following packages before attempting to setup the project: - -- [rbenv](https://github.com/rbenv/rbenv) -- [ruby-build](https://github.com/rbenv/ruby-build) -- [nvm](https://github.com/creationix/nvm) - -On OSX, you can use: - -``` -brew install rbenv ruby-build nvm -``` - -## Setup - -#### Install required Ruby version: - -``` -#### You only need to run these commands if you are missing the needed Ruby version. - -rbenv install `cat .ruby-version` -gem install bundler -v 1.16.3 -rbenv rehash - -#### - -bundle install -rbenv rehash -``` - -#### Install required Node version - -``` -nvm install -nvm use -``` - -#### Install Yarn - -``` -brew install yarn --ignore-dependencies -yarn install -``` - -## Local Development - -To run the website locally for development: - -``` -make serve -``` - -Then navigate to [localhost:4000](localhost:4000). - -Note the `serve` task is contained in a `Makefile` in the root directory. We are using `make` as an alternative to the standard `jekyll serve` as we want to run `yarn`, which is not included in Jekyll by default. - -### Building the Static Site - -To build the static website from source: - -``` -make build -``` - -This will build the static site at `./_site`. This directory is not tracked in git. - -## Deployments - -The website is hosted on [Github Pages](https://pages.github.com/) at [https://pytorch.org](https://pytorch.org). - -To deploy changes, merge your latest code into the `site` branch. A build will be automatically built and committed to the `master` branch via a CircleCI job. - -To view the status of the build visit [https://circleci.com/gh/pytorch/pytorch.github.io](https://circleci.com/gh/pytorch/pytorch.github.io). - -## Contributing to PyTorch Documentation and Tutorials -* You can find information about contributing to PyTorch documentation in the -PyTorch repo [README.md](https://github.com/pytorch/pytorch/blob/master/README.md) file. -* Information about contributing to PyTorch Tutorials can be found in the -tutorials [README.md](https://github.com/pytorch/tutorials/blob/master/README.md). -* Additional contribution information can be found in [PyTorch CONTRIBUTING.md](https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md). diff --git a/_board_info/advanced-micro-devices.md b/_board_info/advanced-micro-devices.md deleted file mode 100644 index 202696a7a13a..000000000000 --- a/_board_info/advanced-micro-devices.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: AMD -summary: '' -link: https://amd.com -image: /assets/images/members/amd-logo.svg -class: pytorch-resource -order: 1 -featured-home: true ---- diff --git a/_board_info/arm.md b/_board_info/arm.md deleted file mode 100644 index 588b4984a914..000000000000 --- a/_board_info/arm.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: arm -summary: '' -link: https://www.arm.com/ -image: /assets/images/members/arm-logo.svg -class: pytorch-resource -order: 2 -featured-home: true ---- diff --git a/_board_info/aws.md b/_board_info/aws.md deleted file mode 100644 index 65b823f8cf13..000000000000 --- a/_board_info/aws.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Amazon -summary: '' -link: https://aws.amazon.com -image: /assets/images/members/aws-logo.svg -class: pytorch-resource -order: 2 -featured-home: true ---- diff --git a/_board_info/google-cloud.md b/_board_info/google-cloud.md deleted file mode 100644 index 10fc9de6d738..000000000000 --- a/_board_info/google-cloud.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Google Cloud -summary: '' -link: https://cloud.google.com/gcp -image: /assets/images/members/google-cloud-logo.svg -class: pytorch-resource -order: 3 -featured-home: true ---- diff --git a/_board_info/huawei.md b/_board_info/huawei.md deleted file mode 100644 index 583d74b0957a..000000000000 --- a/_board_info/huawei.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Huawei -summary: '' -link: https://www.huawei.com/ -image: /assets/images/members/huawei-logo.svg -class: pytorch-resource -order: 5 -featured-home: true ---- diff --git a/_board_info/hugging-face.md b/_board_info/hugging-face.md deleted file mode 100644 index 298c08670cf7..000000000000 --- a/_board_info/hugging-face.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Hugging Face -summary: '' -link: https://huggingface.co/ -image: /assets/images/members/hf-logo.svg -class: pytorch-resource -order: 5 -featured-home: true ---- diff --git a/_board_info/ibm.md b/_board_info/ibm.md deleted file mode 100644 index fa3875006c57..000000000000 --- a/_board_info/ibm.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: IBM -summary: '' -link: https://www.ibm.com/ -image: /assets/images/members/ibm-logo.svg -class: pytorch-resource -order: 6 -featured-home: true ---- diff --git a/_board_info/intel.md b/_board_info/intel.md deleted file mode 100644 index c4f29dbdf4f7..000000000000 --- a/_board_info/intel.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Intel -summary: '' -link: https://www.intel.com/ -image: /assets/images/intel-new-logo.svg -class: pytorch-resource -order: 7 -featured-home: true ---- diff --git a/_board_info/lightning.md b/_board_info/lightning.md deleted file mode 100644 index 037d8f797de9..000000000000 --- a/_board_info/lightning.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Lightning AI -summary: '' -link: https://lightning.ai/ -image: /assets/images/members/lightning-logo.png -class: pytorch-resource -order: 8 -featured-home: true ---- diff --git a/_board_info/meta.md b/_board_info/meta.md deleted file mode 100644 index aac6580d8413..000000000000 --- a/_board_info/meta.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Meta -summary: '' -link: https://meta.com -image: /assets/images/members/meta-logo.svg -class: pytorch-resource -order: 9 -featured-home: true ---- diff --git a/_board_info/microsoft-corporation.md b/_board_info/microsoft-corporation.md deleted file mode 100644 index 7536306b31e2..000000000000 --- a/_board_info/microsoft-corporation.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Microsoft -summary: '' -link: https://azure.microsoft.com -image: /assets/images/members/microsoft-azure-logo.svg -class: pytorch-resource -order: 10 -featured-home: true ---- diff --git a/_board_info/nvidia-corporation.md b/_board_info/nvidia-corporation.md deleted file mode 100644 index fbb018bb9acf..000000000000 --- a/_board_info/nvidia-corporation.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Nvidia -summary: '' -link: https://www.nvidia.com/en-us/ai-data-science/ -image: /assets/images/members/nvidia-logo.svg -class: pytorch-resource -order: 11 -featured-home: true ---- diff --git a/_case_studies/amazon-ads.md b/_case_studies/amazon-ads.md deleted file mode 100644 index 7515e5205a55..000000000000 --- a/_case_studies/amazon-ads.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: blog_detail -title: Amazon Ads -logo: assets/images/amazon-ads-logo.png -featured-home: true -order: 1 -link: /blog/amazon-ads-case-study/ ---- - -Reduce inference costs by 71% and drive scale out using PyTorch, TorchServe, and AWS Inferentia. diff --git a/_case_studies/salesforce.md b/_case_studies/salesforce.md deleted file mode 100644 index 9e0f7713b3b6..000000000000 --- a/_case_studies/salesforce.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: blog_detail -title: Salesforce -logo: assets/images/salesforce.png -featured-home: true -order: 2 -link: ---- - -Pushing the state of the art in NLP and Multi-task learning. diff --git a/_case_studies/stanford-university.md b/_case_studies/stanford-university.md deleted file mode 100644 index 7629ee10a74b..000000000000 --- a/_case_studies/stanford-university.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: blog_detail -title: Stanford University -logo: assets/images/stanford-university.png -featured-home: true -order: 3 -link: ---- - -Using PyTorch's flexibility to efficiently research new algorithmic approaches. diff --git a/_config.yml b/_config.yml index 8048f57000ff..4b319e0d4e19 100644 --- a/_config.yml +++ b/_config.yml @@ -1,131 +1 @@ -# Site settings -title: "PyTorch Website" -author: "Facebook" -default_author: Team PyTorch -description: "Scientific Computing..." -latest_version: 1.0 -timezone: America/Los_Angeles -url: "https://pytorch.org" -baseurl: "" -plugins: - - jekyll-paginate-v2 - - jekyll-redirect-from - - jekyll-autoprefixer - - jekyll-feed -sass: - load_paths: - - _sass - - node_modules -exclude: - [ - vendor, - node_modules, - README.md, - Gemfile, - Gemdile.lock, - yarn.lock, - yarn-error.log, - package.json, - Makefile, - scripts, - _hub/docs/template.md, - ] -include: - [ - _static, - _images, - _modules, - _sources, - _asserts.html, - _comparison.html, - _creation.html, - _dynamo.html, - _inductor.html, - _lowrank.html, - _script.html, - _serialization.html, - _symbolic_trace.html, - _tensor_str.html, - _trace.html, - _utils.html, - ] -keep_files: [vendor/assets, docs/master/_static/js/vendor/] -github: [metadata] -external_urls: - github: https://github.com/pytorch/pytorch - github_issues: https://github.com/pytorch/pytorch/issues - hub_issues: https://github.com/pytorch/hub/issues - contributing: https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md - hub_template: https://github.com/pytorch/hub/blob/master/docs/template.md - twitter: https://twitter.com/pytorch - facebook: https://www.facebook.com/pytorch - slack: https://join.slack.com/t/pytorch/shared_invite/zt-2j2la612p-miUinTTaxXczKOJw48poHA - wechat: https://pytorch.org/wechat - discuss: https://discuss.pytorch.org - contributor_forum: https://dev-discuss.pytorch.org/ - tutorials: https://pytorch.org/tutorials - previous_pytorch_versions: https://pytorch.org/previous-versions/ - udacity_courses: https://pytorch.org - security: https://github.com/pytorch/pytorch/security/policy - youtube: https://www.youtube.com/pytorch - spotify: https://open.spotify.com/show/6UzHKeiy368jKfQMKKvJY5 - apple: https://podcasts.apple.com/us/podcast/pytorch-developer-podcast/id1566080008 - google: https://www.google.com/podcasts?feed=aHR0cHM6Ly9mZWVkcy5zaW1wbGVjYXN0LmNvbS9PQjVGa0lsOA%3D%3D - amazon: https://music.amazon.com/podcasts/7a4e6f0e-26c2-49e9-a478-41bd244197d0/PyTorch-Developer-Podcast? - linkedIn: https://www.linkedin.com/company/pytorch -livereload: true -markdown: kramdown -highlighter: rouge -collections: - get_started: - output: true - ecosystem: - output: true - permalink: /ecosystem/:path/ - hub: - output: true - permalink: /hub/:title/ - community_stories: - output: true - permalink: /community-stories/:path/ - style_guide: - output: false - posts: - output: true - permalink: /blog/:title/ - resources: - output: false - features: - output: false - courses: - output: false - mobile: - output: true - news: - output: true - past_issues: - output: true - events: - output: true - future: true - case_studies: - output: true - board_info: - output: true - community_blog: - output: true - videos: - output: true - -pagination: - enabled: true - per_page: 8 - permalink: "/:num/" - title: ":title | :num of :max" - limit: 0 - sort_field: "date" - sort_reverse: true - trail: - before: 2 - after: 2 -# google_site_verification: eOAFtDphTbbm4OPKva2d3Z0Z_2bBxWMGdkD0IRQ6VeA +include: [_static, _images, _modules, _sources, _asserts.html, _creation.html, _comparison.html, _lowrank.html, _script.html, _diagnostic.html, _dynamo.html, _serialization.html, _type_utils, _tensor_str.html, _trace.html, _utils.html, _internal, _C, _distributed_autograd.html, _distributed_c10d.html, _distributed_rpc.html, _fft.html, _linalg.html, _monitor.html, _nested.html, _nn.html, _profiler.html, _sparse.html, _special.html, __config__.html, _dynamo, _lobpcg.html, _jit_internal.html, _numeric_suite.html, _numeric_suite_fx.html, _sanitizer.html, _symbolic_trace.html, _async.html, _freeze.html, _fuser.html, _type_utils.html, _utils ] diff --git a/_courses/course-1.md b/_courses/course-1.md deleted file mode 100644 index d637355ed0fe..000000000000 --- a/_courses/course-1.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: Course 1 -summary: Lorem ipsum dolor sit amet, consectetur adipiscing elit -thumbnail: http://via.placeholder.com/560x360/ffffff/d8d8d8 -link: https://pytorch.org -order: 1 ---- - diff --git a/_courses/course-2.md b/_courses/course-2.md deleted file mode 100644 index bcbf94fbddd6..000000000000 --- a/_courses/course-2.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: Course 2 -summary: Lorem ipsum dolor sit amet, consectetur adipiscing elit -thumbnail: http://via.placeholder.com/560x360/ffffff/d8d8d8 -link: https://pytorch.org -order: 2 ---- diff --git a/_courses/course-3.md b/_courses/course-3.md deleted file mode 100644 index 35bddf9e3cd0..000000000000 --- a/_courses/course-3.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: Course 3 -summary: Lorem ipsum dolor sit amet, consectetur adipiscing elit -thumbnail: http://via.placeholder.com/560x360/ffffff/d8d8d8 -link: https://pytorch.org -order: 3 ---- diff --git a/_courses/course-4.md b/_courses/course-4.md deleted file mode 100644 index 7bab968a5b34..000000000000 --- a/_courses/course-4.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: Course 4 -summary: Lorem ipsum dolor sit amet, consectetur adipiscing elit -thumbnail: http://via.placeholder.com/560x360/ffffff/d8d8d8 -link: https://pytorch.org -order: 4 ---- diff --git a/_data/ecosystem/ptc/2022/posters.yaml b/_data/ecosystem/ptc/2022/posters.yaml deleted file mode 100644 index 8c6e3fdf1cb9..000000000000 --- a/_data/ecosystem/ptc/2022/posters.yaml +++ /dev/null @@ -1,461 +0,0 @@ -- authors: - - Dinkar Juyal - - Syed Asher Javed - - Harshith Padigela - - Limin Yu - - Aaditya Prakash - - Logan Kilpatrick - - Anand Sampat - - PathAI - categories: - - COMPUTER VISION - description: "PathAI is a Boston based company focussed on improving patient care using AI powered pathology. We heavily use PyTorch for building our ML systems, specifically training and deploying models on large gigapixel pathology images. In this case study, we highlight our use of PyTorch to build, experiment and deploy Additive Multiple Instance Learning (MIL) models. Additive MIL is a novel MIL technique built using PyTorch Lightning which allows end-to-end learning from millions of pixels while providing granular interpretability of spatial heatmaps. These models allow for the exact computation of the extent to which each smaller region in the gigapixel-sized image contributes to the final model prediction. This enables class-wise excitatory and inhibitory contributions to be visualized on top of the pathology image. This informs the practitioners of model failures and guides the pathologists to areas of interest. All this is made possible due to PyTorch's rapid research-to-prototype-to-deployment iteration cycle." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/A01.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/A01-thumb.png - title: "Enabling State-of-the-art Interpretability for Medical Imaging Using PyTorch" - -- authors: - - Erik Hagendorn - categories: - - LIBRARIES - description: "TorchUnmix is a library which aims to provide automatic stain unmixing and augmentation for histopathology whole slide images. Separation of histochemical stains (unmixing) is performed by orthonormal transformation of the RGB pixel data from predefined light absorption coefficients called stain vectors [1]. Precomputed publicly available stain vector definitions are often used, but inter-laboratory variation due to the histology and/or image acquisition process is common, yielding suboptimal unmixing results. Classical stain vector estimation methods rely on abundant distribution of stains, making them less practical for sparser distributions as observed from immunohistochemical stains. Geis et al. proposed a method based on k-means clustering of pixel values in the hue-saturation-density color space to determine optimal stain vectors which has been used in this work [2]. While stain vectors may be used for quantification of individual stains, TorchUnmix also provides functionalities to perform stain augmentation. Stain augmentation is a method used during the training process of deep learning models to improve generalization by unmixing the image, stochastically modifying the individual stains, and then compositing the stains into the final augmented image [3]. To our knowledge, no other libraries fully implement the above methods in PyTorch, utilizing GPU-acceleration. Additionally, TorchUnmix has extended all calculations used to perform the automatic stain unmixing and augmentation to operate on batches of images, drastically accelerating execution performance speeds in comparison to other libraries." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/B01.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/B01-thumb.png - title: "TorchUnmix: Automatic Stain Unmixing and Augmentation for Histopathology Images in PyTorch" - -- authors: - - Kai Fricke - - Balaji Veeramani - categories: - - LIBRARIES - description: "Scaling machine learning is hard: Cloud platform solutions like SageMaker can limit flexibility, but a custom distributed framework is often too hard to implement. In effect, ML engineers struggle to scale their workloads from local prototyping to the cloud. \n The Ray AI Runtime ('Ray AIR') is an integrated collection of machine learning libraries built around distributed computing framework Ray. It provides an easy to use interface for scalable data processing, training, tuning, batch prediction, and online serving. Adapting existing PyTorch training loops to Ray AIR's PyTorch integration needs as little as 10 lines of code changes. And scaling from local development to the cloud needs no code changes at all." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/B02.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/B02-thumb.png - title: "Scalable Training and Inference With Ray AIR" - -- authors: - - Jan HĂŒckelheim - categories: - - LIBRARIES - description: "Mixed Mode autodiff combines back-propagation and forward differentiation. Both modes have pros and cons: Back-propagation is efficient for scalar functions with many trainable parameters. Back-propagation uses memory for intermediate results, requires data flow reversal, scales poorly for many output variables. Forward differentiation is straightforward to implement, memory-efficient, and easy to vectorize/parallelize or port to new hardware. Forward mode scales poorly with large number of trainable parameters. AutoMAD makes it possible to combine both modes. Use forward differentiation for some layers, while using back-prop for others." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/B03.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/B03-thumb.png - title: "AutoMAD: Mixed Mode Autodiff for PyTorch Models" - -- authors: - - Daniel Haziza - - Francisco Massa - - Jeremy Reizenstein - - Patrick Labatut - - Diana Liskovich - categories: - - LIBRARIES - description: "We present xFormers, a toolbox to accelerate research on Transformers. It contains efficient components, like an exact memory-efficient multi-head attention that can accelerate trainings 2x while using a fraction of the memory. xFormers components are also customizable and can be combined together to build variations of Transformers. Our hope is to enable the next generation of research based on Transformers." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/B04.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/B04-thumb.png - title: "xFormers: Building Blocks for Efficient Transformers" - -- authors: - - Max Balandat - categories: - - LIBRARIES - description: "linear_operator (https://github.com/cornellius-gp/linear_operator) is a library for structured linear algebra built on PyTorch. It provides a LinearOperator class that represents a tensor that is never instantiated but is instead accessed through operations like matrix multiplication, solves, decompositions, and indexing. These objects use custom linear algebra operations that can exploit particular matrix structure (e.g. diagonal, block-diagonal, triangular, Kronecker, etc.) in computations in order to achieve substantial (many orders of magnitude) improvements in time and memory complexity. Moreover, many efficient linear algebra operations (e.g. solves, decompositions, indexing, etc.) can be automatically generated from the LinearOperator's matmul function. This makes it extremely easy to compose or implement custom LinearOperators. \n The key aspect that makes linear_operator easy to use in PyTorch code is its integration with the `__torch_function__` interface - Common linear algebra operations (such as matrix multiplication, solve, SVD) are mapped to the respective torch functions (`__matmul__`, `torch.linalg.solve`, `torch.linalg.svd`), so that LinearOperator objects can be used as drop-in replacements for dense tensors even in existing code. LinearOperator operations themselves may return LinearOperator objects, automatically keeping track of algebraic structure after each computation. As a result, users never need to reason about what efficient linear algebra routines to use (so long as the input elements defined by the user encode known input structure)." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/B05.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/B05-thumb.png - title: "linear_operator - Structured Linear Algebra in PyTorch" - -- authors: - - Justin Zhao - categories: - - LIBRARIES - description: "Ludwig is a declarative machine learning framework that makes it easy to define and compare machine learning pipelines using a simple and flexible data-driven configuration system. The minimal configuration declares the input and output features with their respective data types. Users can specify additional parameters to preprocess, encode, and decode features, load from pre-trained models, compose the internal model architecture, set training parameters, or run hyperparameter optimization. Ludwig will build an end-to-end machine learning pipeline automatically, using whatever is explicitly specified in the configuration, while falling back to smart defaults for any parameters that are not. Scientists, engineers, and researchers use Ludwig to explore state-of-the-art model architectures, run hyperparameter search, and scale up to larger than available memory datasets and multi-node clusters, on a variety of problems using structured and unstructured features. Ludwig has 8.5K+ stars on Github and is built on top of PyTorch, Horovod, and Ray." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/B06.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/B06-thumb.png - title: "Declarative Machine Learning with Ludwig: End-to-end Machine Learning Pipelines Using Simple and Flexible Data-driven Configurations" - -- authors: - - Christian Puhrsch - categories: - - LIBRARIES - description: "This poster presents an overview of available and ongoing developments related to sparse memory formats, masked computation, and support for collections of variably shaped data. In particular it contains a case study of block sparse memory formats, MaskedTensor, and NestedTensor." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/B07.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/B07-thumb.png - title: "Generalized Shapes: Block Sparsity, MaskedTensor, NestedTensor" - -- authors: - - Sang Keun Choe - - categories: - - LIBRARIES - description: "Betty is a simple, scalable and modular library for generalized meta-learning (GML) and multilevel optimization (MLO), built upon PyTorch, that allows a unified programming interface for a number of GML/MLO applications including few-shot learning, hyperparameter optimization, neural architecture search, data reweighting, and many more. The internal autodiff mechanism and the software design of Betty are developed by the novel interpretation of GML/MLO as a dataflow graph." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/B08.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/B08-thumb.png - title: "Betty: An Automatic Differentiation Library for Generalized Meta Learning" - -- authors: - - Samantha Andow - - Richard Zhou - - Horace He - - Animesh Jain - categories: - - LIBRARIES - description: "Inspired by Google JAX, functorch is a library in Pytorch that offers composable vmap (vectorization) and autodiff transforms (grad, vjp, jvp). Since its first release alongside Pytorch 1.11, combining these transforms has helped users develop and explore new techniques that were previously tricky to write in Pytorch, like Neural Tangent Kernels and non-linear optimizations (see Theseus, also from PyTorch). This will go through some basic usages and highlight some research that leverages functorch." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/B09.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/B09-thumb.png - title: "Functorch: Composable Function Transforms in Pytorch" - -- authors: - - Patrick Stiller - - Jeyhun Rustamov - - Friedrich Bethke - - Maksim Zhdanov - - Raj Sutarya - - Mahnoor Tanveer - - Karan Shah - - Richard Pausch - - Sunna Torge - - Alexander Debus - - Attila Cangi - - Peter Steinbach - - Michael Bussmann - - Nico Hoffmann - categories: - - LIBRARIES - description: "Our open-source Neural Solvers framework provides data-free ML-based solvers for the study and analysis of phenomena in natural sciences built on top of Pytorch. We were the first to show that certain quantum systems modeled by the 2d Schrödinger equation can be accurately solved while retaining strong scaling. We also developed a novel neural network architecture, GatedPINN [1], introducing adaptable domain decomposition into the training of Physics-informed Neural Networks based on the Mixture-of-Experts paradigm. Distributed large-scale training of our GatedPINN is facilitated by Horovod, resulting in excellent GPU utilization making Neural Solvers ready for the upcoming exascale era. Upcoming projects involve higher dimensional problems such as 3d laser systems and coupled models to study the Vlasov-Maxwell system. Further experiments on novel very scalable compute hardware paves the way for applications of high-fidelity Neural Solvers to real-world applications such as Inverse Scattering Problems." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/B10.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/B10-thumb.png - title: "Large-Scale Neural Solvers for Partial Differential Equations" - -- authors: - - Haoqi Fan - categories: - - LIBRARIES - description: "PyTorchVideo is the deep learning library for video understanding research in PyTorch. \n" - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/B11.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/B11-thumb.png - title: "PyTorch Video: A Deep Learning Library for Video Understanding" - -- authors: - - Zhihan Fang - categories: - - LIBRARIES - description: "Federated Learning with Differential Privacy has witnessed an increased adoption as one of the most promising ways to train machine learning models while preserving user privacy. Existing models in Meta around people attributes are mostly built on traditional centralized machine learning methods. Recently, due to the increasing concerns about user privacy internally and externally, Machine Learning teams at Meta are experiencing either signal loss or restriction on applying new features in models to further improve model performance. In this paper, we are introducing a generic framework we built for preparing and generating models for federated learning. The model preparation process is to utilize traditional machine learning to understand model structure and hyperparameters for the target problems including training, inference, evaluations. It also requires a simulation process to train the target model structure and understand the simulated environment on the server side to tune FL specific hyperparameters. \n The model generation process is to generate device compatible models, which can be used directly on users’ devices for federated learning. We applied the FL framework on our on-device models, and integrated with device signals to improve user experience and protect user privacy." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/B12.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/B12-thumb.png - title: "Model Preparation Federated Learning and Device Computation" - -- authors: - - Jose Gallego-Posada - - Juan Camilo Ramirez - categories: - - LIBRARIES - description: "Cooper (https://github.com/cooper-org/cooper) is a general-purpose, deep learning-first constrained optimization library in PyTorch. Cooper is (almost!) seamlessly integrated with PyTorch and preserves the usual loss backward step workflow. If you are already familiar with PyTorch, using Cooper will be a breeze! \n This library aims to encourage and facilitate the study of constrained optimization problems in deep learning. Cooper focuses on non-convex constrained optimization problems for which the loss or constraints are not necessarily “nicely behaved” or “theoretically tractable”. Moreover, Cooper has been designed to play nicely with mini-batched/stochastic estimates for the objective and constraint functions. \n Cooper implements several popular constrained optimization protocols so you can focus on your project, while we handle the nitty-gritty behind the scenes." - link: https://github.com/cooper-org/cooper - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/B13.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/B13-thumb.png - title: "Constrained Optimization in PyTorch With Cooper" - -- authors: - - Wanchao Liang - - Junjie Wang - categories: - - LIBRARIES - description: "This talk will introduce 2-dimensional parallelism with PyTorch (Data Parallelism + Tensor Parallelism) using Distributed Tensor, a fundamental distributed primitive offered by PyTorch Distributed that empowers Tensor Parallelism. We have proven that using FSDP + Tensor Parallelism together could enable us to train large models like Transformer, and increase training performance. We offer end to end training techniques that enable you to train models in 2-D parallelism fashion, and checkpoint save/load models in a distributed manner." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/B14.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/B14-thumb.png - title: "Two Dimensional Parallelism Using Distributed Tensors" - -- authors: - - Manu Joseph - categories: - - LIBRARIES - description: "In spite of showing unreasonable effectiveness in modalities like text and image, Deep Learning has always lagged Gradient Boosting in tabular data- both in popularity and performance. But recently there have been newer models created specifically for tabular data, which is pushing the performance bar. Popularity is still a challenge, however, because there is no easy, ready-to-use library like Sci-Kit Learn for deep learning. PyTorch Tabular aims to change that by being an easy-to-use and flexible framework which makes using SOTA model architectures in tabular data as easy as Sci-Kit Learn." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/B15.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/B15-thumb.png - title: "PyTorch Tabular: A Framework for Deep Learning with Tabular Data" - -- authors: - - Michael Gschwind - - Christian Puhrsch - - Driss Guessous - - Rui Zhu - - Daniel Haziza - - Francisco Massa - categories: - - LIBRARIES - description: "We introduce Better Transformer, the PyTorch project to accelerate Transformers for inference and training with out-of-the-box enablement by implementing the Better Transformer ‘fastpath’. Fastpath accelerates many of the most commonly executed functions in Transformer models. Starting with PyTorch 1.13, the PyTorch Core API is implemented with accelerated operations to deliver up to 2x-4x speedups on many Transformer models, such as BERT and XLM-R. Accelerated operations are based on (1) operator and kernel fusion and (2) exploiting sparsity created by variable sequence-length NLP batches. In addition to improving MultiHeadAttention with fastpath, the model also includes sparsity support for MultiHeadAttention and TransformerEncoder modules to take advantage of variable sequence-length information with Nested Tensors for NLP models. \n At present, we enable torchtext and Hugging Face domain libraries with Better Transformer, delivering significant speedups for text, image, and audio models. Starting with the next release, PyTorch core will include even faster fused kernels and training support. You can preview these features today with PyTorch Nightlies, the nightly preview builds of the upcoming PyTorch release." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/B17.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/B17-thumb.png - title: "Better Transformer: Accelerating Transformer Inference in PyTorch" - -- authors: - - Ke Wen - - Pavel Belevich - - Anjali Sridhar - categories: - - LIBRARIES - description: "PiPPy is a library that provides automated pipeline parallelism for PyTorch models. With compiler techniques, PiPPy splits a model into pipeline stages without requiring model changes. PiPPy also provides a distributed runtime that distributes the split stages to multiple devices and hosts and orchestrates micro-batch execution in an overlapped fashion. We demonstrate application of PiPPy to Hugging Face models achieving 3x speedup on cloud platforms." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/B18.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/B18-thumb.png - title: "PiPPy: Automated Pipeline Parallelism for PyTorch" - -- authors: - - Keita Watanabe - categories: - - OPTIMIZATION - description: "In this session we will go through step-by-step how to conduct the inference process of machine learning models using Inferentia. In addition, we compare the inference performance with GPU and discuss the cost advantage. In the later part of the session, we will also cover model deployment on Kubernetes." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/C01.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/C01-thumb.png - title: "Practical Guide on PyTorch Inference Using AWS Inferentia" - -- authors: - - Mingfei Ma - - - categories: - - OPTIMIZATION - description: "Accelerating PyG CPU performance with faster sparse aggregation.\nPyG is a library built upon PyTorch to easily write and train Graph Neural Networks, which heavily relies on the mechanism of Message Passing for information aggregation. We have optimized critical bottlenecks of Message Passing from PyTorch, including: 1. Scatter Reduce: maps to classic PyG use case when the EdgeIndex is stored in COO memory format. 2. SpMM Reduce: maps to the usage case when the EdgeIndex is stored in CSR memory format." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/C02.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/C02-thumb.png - title: "PyG Performance Optimization for CPU" - -- authors: - - Jerry Zhang - categories: - - OPTIMIZATION - description: "Currently, PyTorch Architecture Optimization (torch.ao) offers two quantization flow tools: eager mode quantization (beta) and fx graph mode quantization (prototype). With PyTorch 2.0 coming up, we are going to redesign quantization on top of the PyTorch 2.0 export path, this talk will introduce our plans for supporting quantization in PyTorch 2.0 export path, its main advantages over the previous tools, and how modeling developers and backend developers will be interacting with this flow." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/C03.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/C03-thumb.png - title: "Quantization in PyTorch 2.0 Export" - -- authors: - - Naren Dasan - - Dheeraj Peri - - Bo Wang - - Apurba Bose - - George Stefanakis - - Nick Comly - - Wei Wei - - Shirong Wu - - Yinghai Lu - categories: - - OPTIMIZATION - description: "Torch-TensorRT is an open-source compiler targeting NVIDIA GPUs for high-performance deep-learning inference in PyTorch. It combines the usability of PyTorch with the performance of TensorRT allowing for easy optimization of inference workloads on NVIDIA GPUs. Torch-TensorRT supports all classes of optimizations in TensorRT including reduced mixed precision down to INT8, through simple Python & C++ APIs designed to work directly from PyTorch. Torch-TensorRT outputs standard PyTorch modules as well as the TorchScript format to allow for a completely self-contained, portable, & static module with TensorRT engines embedded. We present recent improvements to Torch-TensorRT including the new FX frontend which allows developers to use a full Python workflow for optimizing models and extend Torch-TensorRT in Python, the unified Torch-TensorRT Runtime which enables hybrid FX + TorchScript workflows and discuss future work for the project." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/C04.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/C04-thumb.png - title: "Torch-TensorRT: A Compiler for Accelerating PyTorch Inference Using TensorRT" - -- authors: - - Sanchit Jain - categories: - - OPTIMIZATION - description: "The open-source oneDNN Graph library extends oneDNN with a flexible graph API to maximize the optimization opportunities for generating efficient code on AI hardware (currently x86-64 CPUs, but GPU support is on the way). It automatically identifies the graph partitions to be accelerated via fusion. Its fusion patterns entail fusing compute-intensive operations such as convolution, matmul and their neighbor operations for both inference and training use cases. Since PyTorch 1.12, oneDNN Graph has been supported as an experimental feature to speed up inference with Float32 datatype on x86-64 CPUs. Support for inference with oneDNN Graph using BFloat16 datatype exists in the PyTorch master branch, and hence also in nightly PyTorch releases. Intel Extension for PyTorch is an open-source library that builds on top of PyTorch, and can be thought of as a 'staging-ground' for optimizations in PyTorch from Intel. It leverages oneDNN Graph for inference with int8 datatype. This poster presents reproducible results with PyTorch’s TorchBench benchmarking suite to demonstrate the inference speedup achieved with PyTorch & oneDNN Graph using Float32, BFloat16 & int8 datatypes." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/G01.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/G01-thumb.png - title: "Accelerating Inference with PyTorch by Leveraging Graph Fusions With oneDNN Graph" - -- authors: - - Alban Desmaison - categories: - - OTHER - description: "This poster presents the new extension points that the PyTorch team has designed to allow users to extend PyTorch from Python. We will cover an introduction to Tensor Subclassing, Modes and torch library. We will briefly describe each extension point and talk through examples such as memory profiling, logging used operators, quantization and custom sparse kernel all in less than 100 LOC. We will also introduce the new ways you can add new devices and author kernels without the need to modify PyTorch directly." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/D01.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/D01-thumb.png - title: "Back to Python: Extending PyTorch Without Touching C++" - -- authors: - - Brian Hirsh - categories: - - OTHER - description: "Functionalization is a way to remove mutations from arbitrary PyTorch programs sent to downstream compilers. The PyTorch 2.0 stack is all about capturing graphs of PyTorch operations and sending them off to a compiler to get better performance. PyTorch programs can mutate and alias state, making them unfriendly to compilers. Functionalization is a technique to take a program full of PyTorch operators, including mutable and aliasing operators, and remove all mutations from the program while preserving semantics." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/D02.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/D02-thumb.png - title: "Functionalization in PyTorch" - -- authors: - - Pankaj Takawale - - Dagshayani Kamalaharan - - Zbigniew Gasiorek - - Rahul Sharnagat - categories: - - OTHER - description: "Walmart Search has embarked on the journey of adopting Deep Learning in the Search ecosystem for improving Search relevance in various parts. As our pilot use case, we wanted to serve the computationally intensive Bert Base model at runtime with an objective to achieve low latency and high throughput. We had JVM hosted web applications loading and serving multiple models. The experimental models were being loaded onto the same applications. These models are large in size and computation is expensive. \n We were facing the following limitations with this approach: Refreshing model with the latest version or adding new experimental model would need application deployment. Increased memory pressure on a single application. Slow startup time due to loading multiple ML models during startup. Concurrency was not beneficial due to limited CPU (Metrics on concurrent model prediction vs sequential)." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/D03.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/D03-thumb.png - title: "Walmart Search: Serving Models at a Scale on TorchServe" - -- authors: - - Joe Doliner - - Jimmy Whitaker - categories: - - PRODUCTION - description: "TorchX is incredibly useful for developing PyTorch applications quickly. But when it comes to deployment, nothing is easy. With docker development, Kubernetes, and customer schedulers, there’s a lot to learn. In this talk, we’ll discuss how organizations can deploy to production, why TorchX is a great system for this, and lessons we learned so you can avoid hitting them too." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/E01.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/E01-thumb.png - title: "TorchX: From Local Development to Kubernetes and Back" - -- authors: - - Shauheen Zahirazami - - Jack Cao - - Blake Hechtman - - Alex Wertheim - - Ronghang Hu - categories: - - PRODUCTION - description: "PyTorch/XLA enables PyTorch users to run their models on XLA devices including Google's Cloud TPUs. The latest improvements in PyTorch/XLA enables training PyTorch models using FSDP to train very large models. In this work we present benchmarks and Hardware Flops Utilization of training HuggingFace GPT-2 on Cloud TPU v4." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/E02.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/E02-thumb.png - title: "Training at Scale Using Fully Sharded Data Parallel (FSDP) with PyTorch/XLA" - -- authors: - - Rohan Varma - - Andrew Gu - categories: - - PRODUCTION - description: "This talk dives into recent advances in PyTorch Fully Sharded Data Parallel (FSDP) that have enabled better throughput, memory savings, and extensibility. These improvements have unblocked using FSDP for models of different modalities and for varying model and data sizes. We will share best practices to apply these features to specific use cases such as XLMR, FLAVA, ViT, DHEN, and GPT3-style models." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/E03.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/E03-thumb.png - title: "FSDP Production Readiness" - -- authors: - - Erwin Huizenga - - Nikita Namjoshi - categories: - - PRODUCTION - description: "TorchX is a universal job launcher for PyTorch applications that helps ML practitioners speed up iteration time and support end to end production. In this talk, we show you how to build and run TorchX components as a pipeline using the Kubeflow Pipeline (KFL) DSL. We go into detail on how to use KFP and TorchX to build components and how to use KFP DSL to orchestrate and run ML workflows." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/E04.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/E04-thumb.png - title: "Orchestrating Pytorch Workflows With Kubeflow Pipelines and TorchX" - -- authors: - - Shauheen Zahirazami - - James Rubin - - Mehdi Amini - - Thea Lamkin - - Eugene Burmako - - Navid Khajouei - categories: - - PRODUCTION - description: "ML development is often stymied by incompatibilities between frameworks and hardware, forcing developers to compromise on technologies when building ML solutions. OpenXLA is a community-led and open-source ecosystem of ML compiler and infrastructure projects being co-developed by AI/ML leaders including Alibaba, Amazon Web Services, AMD, Arm, Apple, Google, Intel, Meta, NVIDIA, and more. It will address this challenge by letting ML developers build their models on leading frameworks and execute them with high performance across any hardware backend. This flexibility will let developers make the right choice for their project, rather than being locked into decisions by closed systems. Our community will start by collaboratively evolving the XLA compiler and StableHLO, a portable ML compute operation set that makes frameworks easier to deploy across different hardware options." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/H01.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/H01-thumb.png - title: "A Community- led and OSS Ecosystem of ML Compiler and Infrastructure Projects" - -- authors: - - Mao Lin - - Keren Zhou - - Penfei Su - categories: - - TOOLS - description: "The limited GPU memory resources can often hinder the performance of GPU-accelerated applications. While PyTorch’s Caching Allocator aims to minimize the number of expensive memory allocations and deallocations and maximize the efficient utilization of GPU memory resources, our study of common deep learning models revealed significant memory fragmentation problems. In some cases, up to 50% of GPU memory is wasted. To better understand the root causes of memory fragmentation, we developed a tool that visualizes GPU memory usage in two ways: the allocator view and the block view. The allocator view presents memory usage with each allocation or deallocation event, and the block view shows the changes in specific memory blocks over time. Our analysis revealed the considerable potential to save GPU memory, which would relieve the bottleneck of limited resources. By employing strategies such as swapping, activation recomputation, and memory defragmentation, we were able to reduce GPU memory waste significantly." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/F01.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/F01-thumb.png - title: "Squeezing GPU Memory Usage in PyTorch" - -- authors: - - Mohamed Masoud - - Farfalla Hu - - Sergey Plis - categories: - - TOOLS - description: "In brainchop project, we bring high fidelity pre-trained deep learning models for volumetric analysis of structural magnetic resonance imaging (MRI) right to the browsers of scientists and clinicians with no requirement on their technical skills in setting up AI-solutions. All of this in an extensible open-source framework. Our tool is the first front-end MRI segmentation tool on the web that supports full brain volumetric processing in a single pass inside a browser. This property is powered by our lightweight and reliable deep learning model Meshnet that enables volumetric processing of the entire brain at once, which leads to increased accuracy with modest computational requirements. High-quality client-side processing solves the privacy problem, as the data does not need to leave the client. Moreover, browser-based implementation is able to take advantage of available hardware acceleration regardless of the brand or architecture.\n GitHub: https://github.com/neuroneural/brainchop" - link: https://github.com/neuroneural/brainchop - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/F02.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/F02-thumb.png - title: "'Brainchop': In Browser MRI Volumetric Segmentation and Rendering" - -- authors: - - Xu Zhao - - Will Constable - - David Berard - - Taylor Robie - - Eric Han - - Adnan Aziz - categories: - - TOOLS - description: "Holding the line of performance is challenging for ML frameworks like PyTorch. The existing AI benchmarks like MLPerf are end-to-end, therefore require large volumes of datasets, at-scale GPU clusters, and long benchmarking time. We develop TorchBench, a novel AI benchmark suite which highlights with minimal data inputs, single GPU, and milliseconds-per-test latencies. TorchBench is now deployed as part of the PyTorch nightly release process, guarding performance/correctness regressions and testing experimental PyTorch features on SOTA machine learning models." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/F03.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/F03-thumb.png - title: "TorchBench: Quantifying PyTorch Performance During the Development Loop" - -- authors: - - Gustaf Ahdritz - - Sachin Kadyan - - Will Gerecke - - Luna Xia - - Nazim Bouatta - - Mohammed AlQuraishi - categories: - - TOOLS - description: "OpenFold, developed by Columbia University, is an open-source protein structure prediction model implemented with PyTorch. The goal of OpenFold is to verify that AlphaFold 2 — DeepMind's protein structure prediction model — can be reproduced from scratch and beyond that, make components of the system available to like-minded researchers and academics so they can build on top of it. During this research, Weights & Biases was used to accelerate OpenFold’s reproduction of AlphaFold 2. The collaborative nature of W&B allowed for insights to scale from a single researcher to the entire team and helped solve the reproducibility challenge in ML." - link: - poster_link: https://pytorch.s3.amazonaws.com/posters/ptc2022/F04.pdf - section: F8 - thumbnail_link: https://pytorch.org/assets/images/ptc2022/F04-thumb.png - title: "Democratizing AI for Biology With OpenFold" diff --git a/_data/ecosystem/ptdd/2021/posters.yaml b/_data/ecosystem/ptdd/2021/posters.yaml deleted file mode 100644 index a26264ec07f4..000000000000 --- a/_data/ecosystem/ptdd/2021/posters.yaml +++ /dev/null @@ -1,719 +0,0 @@ -- authors: - - Brian Hu - - Paul Tunison - - Elim Schenck - - Roddy Collins - - Anthony Hoogs - categories: - - MEDICAL & HEALTHCARE, RESPONSIBLE AI - description: "Despite significant progress in the past few years, machine learning-based systems are still often viewed as “black boxes,” which lack the ability to explain their output decisions to human users. Explainable artificial intelligence (XAI) attempts to help end-users understand and appropriately trust machine learning-based systems. One commonly used technique involves saliency maps, which are a form of visual explanation that reveals what an algorithm pays attention to during its decision process. We introduce the xaitk-saliency python package, an open-source, explainable AI framework and toolkit for visual saliency algorithm interfaces and implementations, built for analytics and autonomy applications. The framework is modular and easily extendable, with support for several image understanding tasks, including image classification, image similarity, and object detection. We have also recently added support for the autonomy domain, by creating saliency maps for pixel-based deep reinforcement-learning agents in environments such as ATARI. Several example notebooks are included that demo the current capabilities of the toolkit. xaitk-saliency will be of broad interest to anyone who wants to deploy AI capabilities in operational settings and needs to validate, characterize and trust AI performance across a wide range of real-world conditions and application areas using saliency maps. To learn more, please visit: https://github.com/XAITK/xaitk-saliency." - link: https://github.com/XAITK/xaitk-saliency - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/F8.png - section: F8 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/F8-thumb.png - title: "xaitk-saliency: Saliency built for analytics and autonomy applications" -# - authors: -# - Ali Hatamizadeh -# - Yucheng Tang -# - Vishwesh Nath -# - Dong Yang -# - Holger Roth -# - Bennett Landman -# - Daguang Xu -# categories: -# - MEDICAL & HEALTHCARE, RESPONSIBLE AI -# description: "A novel transformer-based architecture, dubbed UNETR, for semantic segmentation of volumetric medical images by reformulating this task as a 1D sequence-to-sequence prediction problem. Using a transformer encoder increases the model's ability to learn long-range dependencies and effectively captures global contextual representation at multiple scales. The effectiveness of UNETR has been validated on different volumetric segmentation tasks in CT and MRI modalities. UNETR achieves new state-of-the-art performance in both Standard and Free Competitions on the BTCV leaderboard for the multi-organ segmentation and outperforms competing approaches for brain tumor and spleen segmentation on the MSD dataset. UNETR has shown the potential to effectively learn the critical anatomical relationships represented in medical images" -# link: -# poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/F7.png -# section: F7 -# thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/F7-thumb.png -# title: "UNETR: Transformers for 3D Medical Image Segmentation" -- authors: - - Laila Rasmy - - Ziqian Xie - - Bingyu Mao - - Khush Patel - - Wanheng Zhang - - Degui Zhi - categories: - - MEDICAL & HEALTHCARE, RESPONSIBLE AI - description: "CovRNN is a collection of recurrent neural network (RNN)-based models to predict COVID-19 patients' outcomes, using their available electronic health record (EHR) data on admission, without the need for specific feature selection or missing data imputation. CovRNN is designed to predict three outcomes: in-hospital mortality, need for mechanical ventilation, and long length of stay (LOS >7 days). Predictions are made for time-to-event risk scores (survival prediction) and all-time risk scores (binary prediction). Our models were trained and validated using heterogeneous and de-identified data of 247,960 COVID-19 patients from 87 healthcare systems, derived from the CernerÂź Real-World Dataset (CRWD) and 36,140 de-identified patients' data derived from the OptumÂź de-identified COVID-19 Electronic Health Record v. 1015 dataset (2007 - 2020). CovRNN shows higher performance than do traditional models. It achieved an area under the receiving operating characteristic (AUROC) of 93% for mortality and mechanical ventilation predictions on the CRWD test set (vs. 91·5% and 90% for light gradient boost machine (LGBM) and logistic regression (LR), respectively) and 86.5% for prediction of LOS > 7 days (vs. 81·7% and 80% for LGBM and LR, respectively). For survival prediction, CovRNN achieved a C-index of 86% for mortality and 92·6% for mechanical ventilation. External validation confirmed AUROCs in similar ranges. https://www.medrxiv.org/content/10.1101/2021.09.27.2126" - link: https://github.com/ZhiGroup/CovRNN - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/F6.png - section: F6 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/F6-thumb.png - title: "CovRNN—A collection of recurrent neural network models for predicting outcomes of COVID-19 patients using their EHR data" -- authors: - - Sanzhar Askaruly - - Nurbolat Aimakov - - Alisher Iskakov - - Hyewon Cho - - Yujin Ahn - - Myeong Hoon Choi - - Hyunmo Yang - - Woonggyu Jung - categories: - - MEDICAL & HEALTHCARE, RESPONSIBLE AI - description: "Deep learning has transformed many aspects of industrial pipelines recently. Scientists involved in biomedical imaging research are also benefiting from the power of AI to tackle complex challenges. Although the academic community has widely accepted image processing tools, such as scikit-image, ImageJ, there is still a need for a tool which integrates deep learning into biomedical image analysis. We propose a minimal, but convenient Python package based on PyTorch with common deep learning models, extended by flexible trainers and medical datasets. In this work, we also share theoretical dive in the form of course as well as minimal tutorials to run Android applications, containing models trained with Farabio." - link: https://github.com/tuttelikz/farabio - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/F5.png - section: F5 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/F5-thumb.png - title: "Farabio - Deep learning for Biomedical Imaging" -- authors: - - Fernando PĂ©rez-GarcĂ­a - - Rachel Sparks - - SĂ©bastien Ourselin - categories: - - MEDICAL & HEALTHCARE, RESPONSIBLE AI - description: "Processing of medical images such as MRI or CT presents different challenges compared to RGB images typically used in computer vision: a lack of labels for large datasets, high computational costs, and the need of metadata to describe the physical properties of voxels. Data augmentation is used to artificially increase the size of the training datasets. Training with image patches decreases the need for computational power. Spatial metadata needs to be carefully taken into account in order to ensure a correct alignment and orientation of volumes. We present TorchIO, an open-source Python library to enable efficient loading, preprocessing, augmentation and patch-based sampling of medical images for deep learning. TorchIO follows the style of PyTorch and integrates standard medical image processing libraries to efficiently process images during training of neural networks. TorchIO transforms can be easily composed, reproduced, traced and extended. We provide multiple generic preprocessing and augmentation operations as well as simulation of MRI-specific artifacts.TorchIO was developed to help researchers standardize medical image processing pipelines and allow them to focus on the deep learning experiments. It encourages good open-science practices, as it supports experiment reproducibility and is version-controlled so that the software can be cited precisely. Due to its modularity, the library is compatible with other frameworks for deep learning with medical images." - link: https://github.com/fepegar/torchio/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/F4.png - section: F4 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/F4-thumb.png - title: "TorchIO: Pre-processing & Augmentation of Medical Images for Deep Learning Applications" -- authors: - - Michael Zephyr - - Prerna Dogra - - Richard Brown - - Wenqi Li - - Eric Kerfoot - categories: - - MEDICAL & HEALTHCARE, RESPONSIBLE AI - description: "Healthcare image analysis for both radiology and pathology is increasingly being addressed with deep-learning-based solutions. These applications have specific requirements to support various imaging modalities like MR, CT, ultrasound, digital pathology, etc. It is a substantial effort for researchers in the field to develop custom functionalities to handle these requirements. Consequently, there has been duplication of effort, and as a result, researchers have incompatible tools, which makes it hard to collaborate. MONAI stands for Medical Open Network for AI. Its mission is to accelerate the development of healthcare imaging solutions by providing domain-specialized building blocks and a common foundation for the community to converge in a native PyTorch paradigm." - link: https://monai.io/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/F3.png - section: F3 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/F3-thumb.png - title: "MONAI: A Domain Specialized Library for Healthcare Imaging" -- authors: - - Sahar Karimi - - Beliz Gokkaya - - Audrey Flower - - Ehsan Emamjomeh-Zadeh - - Adly Templeton - - Ilknur Kaynar Kabul - - Erik Meijer - categories: - - MEDICAL & HEALTHCARE, RESPONSIBLE AI - description: "We are presenting a framework for building Bayesian Neural Networks (BNN). One of the critical use cases of BNNs is uncertainty quantification of ML predictions in deep learning models. Uncertainty quantification leads to more robust and reliable ML systems that are often employed to prevent catastrophic outcomes of overconfident predictions especially in sensitive applications such as integrity, medical imaging and treatments, self driving cars, etc.. Our framework provides tools to build BNN models, estimate the uncertainty of their predictions, and transform existing models into their BNN counterparts. We discuss the building blocks and API of our framework along with a few examples and future directions." - link: - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/F2.png - section: F2 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/F2-thumb.png - title: "A Framework for Bayesian Neural Networks" -# - authors: -# - Pranav Bhamidipati -# - Guruprasad Raghavan -# - Matt Thomson -# categories: -# - MEDICAL & HEALTHCARE, RESPONSIBLE AI -# description: "Biological tissues reliably grow into precise, functional structures from simple starting states during development. Throughout the developmental process, the energy of a tissue changes depending on its natural resistance to deformations such as stretching, bending, shearing, and torsion. In this paper, we represent tissue structures as shapes and develop a mathematical framework using PyTorch's autograd functionality and TorchVision to discover paths on the tissue shape manifold to minimize the total energy during development. We find that paths discovered by gradient descent and the geodesic algorithm outperform naive shape interpolation in energetic terms and resemble strategies observed in development. Broadly, these tools built on PyTorch frameworks can be used to understand and compare shape transformations in biology and propose optimal strategies for synthetic tissue engineering." -# link: -# poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/F1.png -# section: F1 -# thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/F1-thumb.png -# title: "Traversing Geodesics to Grow Biological Structures" -# - authors: -# - Zhengyang Feng -# - Shaohua Guo -# categories: -# - AUDIO, IMAGE & VIDEO, VISION -# description: "PytorchAutoDrive is an open-source codebase to facilitate autonomous driving research, which focuses on autonomous driving perception tasks. Based on PyTorch and TorchVision, it provides a unified level of tricks for fair evaluation of different methods, beginner-friendly codes, visualization tools, and benchmarking of model speed/flops count. Currently, with PyTorch DDP and AMP, fast training of semantic segmentation and lane detection tasks are supported on 7 datasets, with 9 re-implemented methods. With help from the PyTorch developer community, we will support more methods and functionals in the future https://github.com/voldemortX/pytorch-auto-drive" -# link: -# poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/E10.png -# section: E10 -# thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/E10-thumb.png -# title: "PytorchAutoDrive: Toolkit & Fair Benchmark for Autonomous Driving Research" -- authors: - - Philip Meier - - torchvision team - - torchdata team - categories: - - AUDIO, IMAGE & VIDEO, VISION - description: "torchvision provides a lot of image and video datasets as well as transformations for research and prototyping. In fact, the very first release of torchvision in 2016 was all about these two submodules. Since their inception their extent has grown organically and became hard to maintain and sometimes also hard to use. Over the years we have gathered a lot of user feedback and decided to revamp the datasets and transforms. This poster will showcase the current state of the rework and compare it to the hopefully soon to be legacy API." - link: https://pytorchvideo.org/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/E9-thumb.png - section: E9 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/E9-thumb.png - title: "Revamp of torchvision datasets and transforms" -- authors: - - Wenwei Zhang - - Han Lyu - - Kai Chen - categories: - - AUDIO, IMAGE & VIDEO, VISION - description: "OpenMMLab builds open-source tool boxes for computer vision. It aims to 1) provide high-quality codebases to reduce the difficulties in algorithm reimplementation; 2) create efficient deployment toolchains targeting a variety of inference engines and devices; 3) build a solid foundation for the community to bridge the gap between academic research and industrial applications. Based on PyTorch, OpenMMLab develops MMCV to provide unified abstract interfaces and common utils, which serve as a foundation of the whole system. Since the initial release in October 2018, OpenMMLab has released 15+ tool boxes covering different research areas. It has implemented 200+ algorithms and released contain 1800+ pre-trained models. With tighter collaboration with the community, OpenMMLab will open source more toolboxes and full-stack toolchains in the future." - link: openmmlab.com - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/E8.png - section: E8 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/E8-thumb.png - title: "OpenMMLab: Open-Source Toolboxes for Artificial Intelligence" -# - authors: -# - Ayse Ayyuce Demirbas -# categories: -# - AUDIO, IMAGE & VIDEO, VISION -# description: "A Generative Adversarial Network (GAN) is a powerful architecture to generate realistic images. In this work, we generate new lung adenocarcinoma tissue images with GAN using Lung and Colon Cancer Histopathological Images dataset. Additionally, we propose two convolutional neural network models for the generator and discriminator." -# link: -# poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/E7.png -# section: E7 -# thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/E7-thumb.png -# title: "Generation of Synthetic Lung Cancer Histopathological Images using Generative Adversarial Networks" -# - authors: -# - Haoqi Fan* -# - Tullie Murrell* -# - Heng Wang† -# - Kalyan Vasudev Alwala† -# - Yanghao Li† -# - Yilei Li† -# - Bo Xiong† -# - Nikhila Ravi -# - Meng Li -# - Haichuan Yang -# - Jitendra Malik -# - Ross Girshick -# - Matt Feiszli -# - Aaron Adcock‡ -# - Wan-Yen Lo‡ -# - Christoph Feichtenhofer‡ -# categories: -# - AUDIO, IMAGE & VIDEO, VISION -# description: "We introduce PyTorchVideo, an open-source deep-learning library that provides a rich set of modular, efficient, and reproducible components for a variety of video understanding tasks, including classification, detection, self-supervised learning, and low-level processing. The library covers a full stack of video understanding tools including multimodal data loading, transformations, and models that reproduce state-of-the-art performance. PyTorchVideo further supports hardware acceleration that enables real-time inference on mobile devices. The library is based on PyTorch and can be used by any training framework; for example, PyTorchLightning, PySlowFast, or Classy Vision. PyTorchVideo is available at https://pytorchvideo.org/." -# link: https://pytorchvideo.org/ -# poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/E6.png -# section: E6 -# thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/E6-thumb.png -# title: "PyTorchVideo - A Deep Learning Library for Video Understanding" -- authors: - - Siddha Ganju - - Sayak Paul - categories: - - AUDIO, IMAGE & VIDEO, VISION - description: "Floods wreak havoc throughout the world, causing billions of dollars in damages, and uprooting communities, ecosystems and economies. Aligning flood extent mapping with local topography can provide a plan-of-action that the disaster response team can consider. Thus, remote flood level estimation via satellites like Sentinel-1 can prove to be remedial. The Emerging Techniques in Computational Intelligence (ETCI) competition on Flood Detection tasked participants with predicting flooded pixels after training with synthetic aperture radar (SAR) images in a supervised setting. We use a cyclical approach involving two stages (1) training an ensemble model of multiple UNet architectures with available high and low confidence labeled data and, generating pseudo labels or low confidence labels on the entire unlabeled test dataset, and then, (2) filter out quality generated labels and, (3) combining the generated labels with the previously available high confidence labeled dataset. This assimilated dataset is used for the next round of training ensemble models. This cyclical process is repeated until the performance improvement plateaus. Additionally, we post-process our results with Conditional Random Fields. Our approach sets the second-highest score on the public hold-out test leaderboard for the ETCI competition with 0.7654 IoU. To the best of our knowledge we believe this is one of the first works to try out semi-supervised learning to improve flood segmentation models." - link: https://github.com/sidgan/ETCI-2021-Competition-on-FLood-Detection - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/E5.png - section: E5 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/E5-thumb.png - title: "Flood Segmentation on Sentinel-1 SAR Imagery with Semi-Supervised Learning" -# - authors: -# - Kevin Zakka -# - Andy Zeng -# - Pete Florence -# - Jonathan Tompson -# - Jeannette Bohg -# - Debidatta Dwibedi -# categories: -# - AUDIO, IMAGE & VIDEO, VISION -# description: "We investigate the visual cross-embodiment imitation setting, in which agents learn policies from videos of other agents (such as humans) demonstrating the same task, but with stark differences in their embodiments -- end-effector shape, actions, etc. In this work, we demonstrate that it is possible to automatically discover and learn vision-based reward functions from cross-embodiment demonstration videos that are robust to these differences. Specifically, we present a self-supervised method for Cross-embodiment Inverse Reinforcement Learning (XIRL) that leverages temporal cycle-consistency constraints to learn deep visual embeddings that capture task progression from offline videos of demonstrations across multiple expert agents, each performing the same task differently due to embodiment differences. We show empirically that if the embeddings are aware of task progress, simply taking the negative distance between the current state and goal state in the learned embedding space is useful as a reward for training policies with reinforcement learning. We find our learned reward function not only works for embodiments seen during training, but also generalizes to entirely new embodiments. Additionally, when transferring real-world human demonstrations to a simulated robot, we find that XIRL is more sample efficient than current best methods." -# link: https://x-irl.github.io/ -# poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/E4.png -# section: E4 -# thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/E4-thumb.png -# title: "XIRL: Cross-embodiment Inverse Reinforcement Learning" -- authors: - - Xiaoyu Liu - - James Wagner - - Roy Fejgin - - Joan Serra - - Santiago Pascual - - Cong Zhou - - Jordi Pons - - Vivek Kumar - categories: - - AUDIO, IMAGE & VIDEO, VISION - description: "Speech enhancement is a fundamental audio processing task that has experienced a radical change with the advent of deep learning technologies. We will overview the main characteristics of the task and the key principles of existing deep learning solutions. We will be presenting the past and present work done by our group with the overall goal of delivering the best possible intelligibility and sound quality. Finally, we will provide our view on the future of speech enhancement and show how our current long-term research aligns with such a view." - link: - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/E3.png - section: E3 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/E3-thumb.png - title: "Real time Speech Enhancement" -- authors: - - Edgar Riba - - Dmytro Mishkin - - Jian Shi - - Luis Ferraz - categories: - - AUDIO, IMAGE & VIDEO, VISION - description: "Kornia is a differentiable library that allows classical computer vision to be integrated into deep learning models. It consists of a set of routines and differentiable modules to solve generic computer vision problems. At its core, the package uses PyTorch as its main backend both for efficiency and to take advantage of the reverse-mode auto-differentiation to define and compute the gradient of complex functions." - link: https://kornia.github.io// - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/E2.png - section: E2 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/E2-thumb.png - title: "Kornia AI: Low Level Computer Vision for AI" -- authors: - - Daniel Neimark - - Omri Bar - - Maya Zohar - - Dotan Asselmann - categories: - - AUDIO, IMAGE & VIDEO, VISION - description: "This paper presents VTN, a transformer-based framework for video recognition. Inspired by recent developments in vision transformers, we ditch the standard approach in video action recognition that relies on 3D ConvNets and introduce a method that classifies actions by attending to the entire video sequence information. Our approach is generic and builds on top of any given 2D spatial network. In terms of wall runtime, it trains 16.1× faster and runs 5.1× faster during inference while maintaining competitive accuracy compared to other state-of-the-art methods. It enables whole video analysis, via a single end-to-end pass, while requiring 1.5× fewer GFLOPs. We report competitive results on Kinetics-400 and present an ablation study of VTN properties and the trade-off between accuracy and inference speed. We hope our approach will serve as a new baseline and start a fresh line of research in the video recognition domain. Code and models are available at: https://github.com/bomri/SlowFast/blob/master/projects/vtn/README.md . See paper: https://arxiv.org/abs/2102.00719" - link: https://github.com/bomri/SlowFast/blob/master/projects/vtn/README.md - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/E1.png - section: E1 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/E1-thumb.png - title: "Video Transformer Network" -- authors: - - Dr. Ehsan Saboori - - Dr. Sudhakar Sah - - MohammadHossein AskariHemmat Saad Ashfaq - - Alex Hoffman - - Olivier Mastropietro - - Davis Sawyer - categories: - - PERFORMANCE, PRODUCTION & DEPLOYMENT - description: "The emergence of Deep Neural Networks (DNNs) on embedded and low-end devices holds tremendous potential to expand the adoption of AI technologies to wider audiences. However, making DNNs applicable for inference on such devices using techniques such as quantization and model compression, while maintaining model accuracy, remains a challenge for production deployment. Furthermore, there is a lack of inference engines available in any AI framework to run such low precision networks. Our work presents a novel inference engine and model compression framework that automatically enables PyTorch developers to quantize and run their deep learning models at 2bit and 1bit precision, making them faster, smaller and more energy-efficient in production. DLRT empowers PyTorch developers to unlock advanced AI on low-power CPUs, starting with ARM CPUs and MCUs. This work allows AI researchers and practitioners to achieve 10x faster inference and near-GPU level performance on a fraction of the power and cost." - link: https://github.com/deeplite - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/D7.png - section: D7 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/D7-thumb.png - title: "DLRT: Ultra Low-Bit Precision Inference Engine for PyTorch on CPU" -- authors: - - Adway Dhillo - - Nidhin Pattaniyil - categories: - - PERFORMANCE, PRODUCTION & DEPLOYMENT - description: "This poster is for a data scientist or ML engineer looking to productionalize their pytorch models. It will cover post training steps that should be taken to optimize the model such as quantization and torch script. It will also walk the user in packaging and serving the model through Facebook’s TorchServe. Will also cover benefits of script mode and Pytorch JIT. Benefits of Torch Serve: high performance serving , multi model serving , model version for A/B testing, server side batching, support for pre and post processing" - link: https://pytorch.org/serve/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/D6.png - section: D6 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/D6-thumb.png - title: "Serving PyTorch Models in Production at Walmart Search" -- authors: - - Shengyi Huang - - Rousslan Fernand Julien Dossa - - Chang Ye - - Jeff Braga - categories: - - PERFORMANCE, PRODUCTION & DEPLOYMENT - description: "CleanRL is an open-source library that provides high-quality single-file implementations of Deep Reinforcement Learning algorithms. It provides a simpler yet scalable developing experience by having a straightforward codebase and integrating production tools to help interact and scale experiments. In CleanRL, we put all details of an algorithm into a single file, making these performance-relevant details easier to recognize. Additionally, an experiment tracking feature is available to help log metrics, hyperparameters, videos of an agent's gameplay, dependencies, and more to the cloud. Despite succinct implementations, we have also designed tools to help scale, at one point orchestrating experiments on more than 2000 machines simultaneously via Docker and cloud providers.environments. The source code can be found at https://github.com/vwxyzjn/cleanrl." - link: https://github.com/vwxyzjn/cleanrl/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/D5.png - section: D5 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/D5-thumb.png - title: "CleanRL: high-quality single file implementation of Deep Reinforcement Learning algorithms with research-friendly features" -- authors: - - Nidhin Pattaniyil - - Reshama Shaikh - categories: - - PERFORMANCE, PRODUCTION & DEPLOYMENT - description: "As technology improves, so does the use of training deep learning models. Additionally, since the time spent on mobile devices is greater than on desktop, the demand for applications running natively on mobile devices is also high. This demo will go through a complete example of training a deep learning vision classifier on the Food-101 dataset using PyTorch. We then deploy it on web and mobile using TorchServe and PyTorch Mobile." - link: https://github.com/npatta01/pytorch-food - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/D4.png - section: D4 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/D4-thumb.png - title: "Deploying a Food Classifier on PyTorch Mobile" -# - authors: -# - James McCaffrey -# - Ricky Loynd -# - Amanda Minnich -# - Bryan Xia -# categories: -# - PERFORMANCE, PRODUCTION & DEPLOYMENT -# description: "Anomaly detection using deep neural autoencoder reconstruction error is a well-known and effective technique. Reconstruction error anomaly detection compares X and reconstructed X, and when they differ greatly, X is likely anomalous in some way. Recent research has explored an evolution of the autoencoder reconstruction error technique, called variational autoencoder (VAE) reconstruction probability. Briefly, a source data item X generates an internal (u1, v1) mean and standard deviation (equivalently, variance or log-variance) which define a probability distribution P. The P(u1, v1) distribution determines a Q(u2, v2) distribution which is sampled to generate a reconstructed X. The VAE reconstruction probability technique determines how likely it is that the source item X came from the Q(u2, v2) distribution, and if the likelihood is small, X is tagged as anomalous. Experiments on synthetic datasets suggest that the autoencoder reconstruction error and VAE reconstruction probability techniques identify different types of anomalous data items." -# link: -# poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/D3.png -# section: D3 -# thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/D3-thumb.png -# title: "Variational Autoencoder Reconstruction Probability Anomaly Detection" -- authors: - - Naren Dasan - - Nick Comly - - Dheeraj Peri - - Anurag Dixit - - Abhiram Iyer - - Bo Wang - - Arvind Sridhar - - Boris Fomitchev - - Josh Park - categories: - - PERFORMANCE, PRODUCTION & DEPLOYMENT - description: "Learn how to accelerate PyTorch inference, from framework, for model deployment. The PyTorch integration for TensorRT makes the performance of TensorRT's GPU optimizations available in PyTorch for any model. We will walk you through how with 3 lines of code you can go from a trained model to optimized TensorRT-embedded TorchScript, ready to deploy to a production environment." - link: https://github.com/NVIDIA/Torch-TensorRT/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/D2.png - section: D2 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/D2-thumb.png - title: "Torch-TensorRT: Accelerating Inference Performance Directly from PyTorch using TensorRT" -- authors: - - Jean Kossaifi - categories: - - PERFORMANCE, PRODUCTION & DEPLOYMENT - description: "Most of the data in modern machine learning (e.g. fMRI, videos, etc) is inherently multi-dimensional and leveraging that structure is crucial for good performance. Tensor methods are the natural way to achieve this and can improve deep learning and enable i) large compression ratios through a reduction of the number of parameters, ii) computational speedups, iii) improved performance and iv) better robustness. The TensorLy project provides the tools to manipulate tensors, including tensor algebra, regression and decomposition. TensorLy-Torch builds on top of this and enables tensor-based deep learning by providing out-of-the-box tensor based PyTorch layers that can be readily combined with any deep neural network architecture and takes care of things such as initialization and tensor dropout." - link: http://tensorly.org/quantum - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/D1.png - section: D1 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/D1-thumb.png - title: "Tensorized Deep Learning with TensorLy-Torch" -- authors: - - Sergey Kolesnikov - categories: - - EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING - description: "Catalyst is a PyTorch framework for Deep Learning Research and Development. It focuses on reproducibility, rapid experimentation, and codebase reuse so you can create something new rather than write yet another train loop." - link: https://catalyst-team.com/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C12.png - section: C12 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C12-thumb.png - title: "Catalyst-Accelerated Deep Learning R&D" -- authors: - - Amog Kamsetty - - Richard Liaw - - Will Drevo - - Michael Galarnyk - categories: - - EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING - description: "PyTorch Lightning is a library that provides a high-level interface for PyTorch which helps you organize your code and reduce boilerplate. By abstracting away engineering code, it makes deep learning experiments easier to reproduce and improves developer productivity. PyTorch Lightning also includes plugins to easily parallelize your training across multiple GPUs. This parallel training, however, depends on a critical assumption: that you already have your GPU(s) set up and networked together in an efficient way for training. While you may have a managed cluster like SLURM for multi-node training on the cloud, setting up the cluster and its configuration is no easy task. Ray Lightning was created with this problem in mind to make it easy to leverage multi-node training without needing extensive infrastructure expertise. It is a simple and free plugin for PyTorch Lightning with a number of benefits like simple setup, easy scale up, seamless creation of multi-node clusters on AWS/Azure/GCP via the Ray Cluster Launcher, and an integration with Ray Tune for large-scale distributed hyperparameter search and state of the art algorithms" - link: https://github.com/ray-project/ray_lightning - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C11.png - section: C11 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C11-thumb.png - title: "Ray Lightning: Easy Multi-node PyTorch Lightning training" -- authors: - - Jin Howe Teo - - Way Yen Chen - - Najib Ninaba - - Choo Heng Chong Mark - categories: - - EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING - description: "Data sits as the centerpiece of any machine learning endeavour, yet in many real-world projects, a single party’s data is often insufficient and needs to be augmented with data from other sources. This is unfortunately easier said than done, as there are many innate concerns (be it regulatory, ethical, commercial etc.) stopping parties from exchanging data. Fortunately, there exists an emerging privacy-preserving machine learning technology called Federated Learning. It enables multiple parties holding local data to collaboratively train machine learning models without actually exchanging their data with one another, hence preserving the confidentiality of different parties’ local data.Today, we will be showcasing Synergos, a distributed platform built here at AI Singapore to facilitate the adoption of Federated Learning. Specifically, it strives to make the complex mechanisms involved in any federated endeavour simple, accessible and sustainable." - link: https://github.com/aimakerspace/synergos_simulator - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C10.png - section: C10 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C10-thumb.png - title: "Supercharge your Federated Learning with Synergos" -- authors: - - Aurick Qiao - - Omkar Pangarkar - - Richard Fan - categories: - - EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING - description: "AdaptDL is an open source framework and scheduling algorithm that directly optimizes cluster-wide training performance and resource utilization. By elastically re-scaling jobs, co-adapting batch sizes and learning rates, and avoiding network interference, AdaptDL improves shared-cluster training compared with alternative schedulers. AdaptDL can automatically determine the optimal number of resources given a job’s need. It will efficiently add or remove resources dynamically to ensure the highest-level performance. The AdaptDL scheduler will automatically figure out the most efficient number of GPUs to allocate to your job, based on its scalability. When the cluster load is low, your job can dynamically expand to take advantage of more GPUs. AdaptDL offers an easy-to-use API to make existing PyTorch training code elastic with adaptive batch sizes and learning rates. We have also ported AdaptDL to Ray/Tune which can automatically scale trials of an Experiment and can be used to schedule stand-alone PyTorch training jobs on the cloud in a cost-effective way." - link: https://github.com/petuum/adaptdl - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C9.png - section: C9 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C9-thumb.png - title: "AdaptDL: An Open-Source Resource-Adaptive Deep Learning Training and Scheduling Framework" -- authors: - - Vasiliy Kuznetsov - - James Reed - - Jerry Zhang - categories: - - EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING - description: "Describes a prototype PyTorch workflow to perform quantization syntax transforms in Eager mode with: * no model changes needed (compared to Eager mode which requires manual quant/dequant insertion and fusion) * almost no model syntax restrictions (compared to FX graph mode which requires symbolic traceability)" - link: https://pytorch.org/docs/stable/quantization.html - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C8.png - section: C8 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C8-thumb.png - title: "Define-by-run quantization" -- authors: - - Charles Hernandez - - Vasiliy Kuznetzov - - Haixin Liu - categories: - - EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING - description: "wrong when it doesn't satisfy the accuracy we expect. Debugging the accuracy issue of quantization is not easy and time consuming. The Fx Numeric Suite Core APIs allows users to better diagnose the source of their quantization error for both statically and dynamically quantized modelsThis poster gives an overview of the core APIs and techniques available to users through the Fx Numeric Suite, and how they can use them to improve quantization performance." - link: - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C7.png - section: C7 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C7-thumb.png - title: "Fx Numeric Suite Core APIs" -- authors: - - J.K. Eshraghian - - M. Ward - - E.O. Neftci - - G. Lenz - - X. Wang - - G. Dwivedi - - M. Bennamoun - - D.S. Jeong - - W.D. Lu - categories: - - EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING - description: "The brain is the perfect place to look for inspiration to develop more efficient neural networks. One of the main differences with modern deep learning is that the brain encodes and processes information as spikes rather than continuous activations. Combining the training methods intended for neural networks with the sparse, spiking activity inspired by biological neurons has shown the potential to improve the power efficiency of training and inference by several orders of magnitude. snnTorch is a Python package for performing gradient-based learning with spiking neural networks. It extends the capabilities of PyTorch, taking advantage of its GPU accelerated tensor computation and applying it to networks of event-driven spiking neurons. snnTorch is designed to be intuitively used with PyTorch, as though each spiking neuron were simply another activation in a sequence of layers. It is therefore agnostic to fully-connected layers, convolutional layers, residual connections, etc. The classical challenges that have faced the neuromorphic engineering community, such as the non-differentiability of spikes, the dead neuron problem, vanishing gradients in backpropagation-through-time, are effectively solved in snnTorch and enable the user to focus on building applications that leverage sparsity and event-driven data streams." - link: https://snntorch.readthedocs.io/en/latest/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C6.png - section: C6 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C6-thumb.png - title: "snnTorch: Training spiking neural networks using gradient-based optimization" -- authors: - - Daniel Falbel - categories: - - EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING - description: "Last year the PyTorch for the R language project has been released allowing R users to benefit of PyTorch's speed and flexibility. Since then we have a growing community of contributors that are both improving the torch for R interface, building research and products on top of it and using it to teach deep learning methods. In this poster we will showcase what are the past and current developments in the PyTorch for R project as well as what are our plans for the future." - link: https://torch.mlverse.org/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C5.png - section: C5 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C5-thumb.png - title: "PyTorch for R" -- authors: - - Laurent Mazare - categories: - - EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING - description: "The main front-end for using PyTorch is its Python API, however LibTorch provides a lower level C++ API to manipulate tensors, perform automatic differentiation, etc. ocaml-torch and tch-rs are two open-source projects providing wrappers for this C++ API respectively in OCaml and Rust. Users can then write OCaml and Rust code to create new models, perform inference and training, and benefit from the guarantees provided by strongly typed programming languages and functional programming. They can also use TorchScript to leverage existing Python models. The libraries provide various examples, ranging from the main computer vision models to a minimalist GPT implementation. - The main challenges for these bindings are to provide idiomatic APIs adapted to the languages specificities; to automatically generate most of the bindings code as there are thousands of C++ functions to expose; and to interact properly with the memory models for each language." - link: https://github.com/laurentMazare/ocaml-torch - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C4.png - section: C4 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C4-thumb.png - title: "ocaml-torch and tch-rs: writing and using PyTorch models using OCaml or Rust" -- authors: - - Ari Bornstein - categories: - - EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING - description: "Flash is a high-level deep learning framework for fast prototyping, baselining, finetuning and solving deep learning problems. It features a set of tasks for you to use for inference and finetuning out of the box, and an easy to implement API to customize every step of the process for full flexibility. Flash is built for beginners with a simple API that requires very little deep learning background, and for data scientists, Kagglers, applied ML practitioners and deep learning researchers that want a quick way to get a deep learning baseline with advanced features PyTorch Lightning offers. Flash enables you to easily configure and run complex AI recipes for over 15 tasks across 7 data domains" - link: https://github.com/PyTorchLightning - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C3.png - section: C3 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C3-thumb.png - title: "PyTorch Lightning Flash - Your PyTorch AI Factory" -- authors: - - Victor Fomin - - Taras Savchyn - - Priyansi - categories: - - EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING - description: "PyTorch-Ignite is a high-level library to help with training and evaluating neural networks in PyTorch flexibly and transparently. PyTorch-Ignite is designed to be at the crossroads of high-level Plug & Play features and under-the-hood expansion possibilities. The tool aims to improve the deep learning community's technical skills by promoting best practices where things are not hidden behind a divine tool that does everything, but remain within the reach of users. PyTorch-Ignite differs from other similar tools by allowing users to compose their applications without being focused on a super multi-purpose object, but rather on weakly coupled components allowing advanced customization." - link: https://pytorch-ignite.ai/ecosystem/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C2.png - section: C2 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C2-thumb.png - title: "PyTorch-Ignite: Training and evaluating neural networks flexibly and transparently" -- authors: - - Albert Jimenez - - Mohamed Akrout - categories: - - EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING - description: "Backpropagation is the default algorithm for training deep neural networks due to its simplicity, efficiency and high convergence rate. However, its requirements make it impossible to be implemented in a human brain. In recent years, more biologically plausible learning methods have been proposed. Some of these methods can match backpropagation accuracy, and simultaneously provide other extra benefits such as faster training on specialized hardware (e.g., ASICs) or higher robustness against adversarial attacks. While the interest in the field is growing, there is a necessity for open-source libraries and toolkits to foster research and benchmark algorithms. In this poster, we present BioTorch, a software framework to create, train, and benchmark biologically motivated neural networks. In addition, we investigate the performance of several feedback alignment methods proposed in the literature, thereby unveiling the importance of the forward and backward weight initialization and optimizer choice. Finally, we provide a novel robustness study of these methods against state-of-the-art white and black-box adversarial attacks." - link: https://github.com/jsalbert/biotorch - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C1.png - section: C1 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/C1-thumb.png - title: "Benchmarking the Accuracy and Robustness of Feedback Alignment Methods" -- authors: - - Ludovic Denoyer - - Alfredo de la Fuente - - Song Duong - - Jean-Baptiste Gaya - - Pierre-Alexandre Kamienny - - Daniel H. Thompson - categories: - - ML Ops, MODELS, MODEL OPTIMIZATION & INTERPRETABILITY - description: "salina is a lightweight library extending PyTorch modules for the development of sequential decision models. It can be used for Reinforcement Learning (including model-based with differentiable environments, multi-agent RL, ...), but also in a supervised/unsupervised learning settings (for instance for NLP, Computer Vision, etc..)." - link: https://github.com/facebookresearch/salina - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/B7.png - section: B7 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/B7-thumb.png - title: "Salina: Easy programming of Sequential Decision Learning and Reinforcement Learning Models in pytorch" -- authors: - - Zafar Takhirov - - Karen Zhou - - Raghuraman Krishnamoorthi - categories: - - ML Ops, MODELS, MODEL OPTIMIZATION & INTERPRETABILITY - description: "Two new toolflows for model pruning are introduced: Sparsifier and Pruner, which enable unstructured and structured pruning of the model weights respectively. The toolflow can be combined with other optimization techniques, such as quantization to achieve even higher levels of model compression. In addition to that, the \"Pruner\" toolflow can also be used for \"shape propagation\", where the physical structure of the model is modified after structured pruning (in FX graph mode only).This poster gives a high-level overview of the prototype API, usage example, currently supported sparse quantized kernels, as well as provides a brief overview of future plans" - link: https://github.com/pytorch/pytorch - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/B6.png - section: B6 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/B6-thumb.png - title: "Structured and Unstructured Pruning Workflow in PyTorch" -- authors: - - François-Guillaume Fernandez - categories: - - ML Ops, MODELS, MODEL OPTIMIZATION & INTERPRETABILITY - description: "One of the core inconveniences of Deep Learning comes from its interpretability, which remains obscure for most non-basic convolutional models. Their very performances are granted by optimization processes that have high degrees of freedom and no constraints on explainability. Fortunately, modern frameworks mechanisms grant access to information flow in their components, which paved the way to building intuition around result interpretability in CNN models. The main contributions of the author are described as follows: - - building a flexible framework for class activation computation - - providing high-quality implementations of most popular methods - - making these methods usable by entry users as well as researchers - The open-source project is available here: https://github.com/frgfm/torch-cam" - link: https://github.com/frgfm/torch-cam - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/B5.png - section: B5 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/B5-thumb.png - title: "Torch-CAM: class activation explorer" -- authors: - - Nikolaos Zioulis - categories: - - ML Ops, MODELS, MODEL OPTIMIZATION & INTERPRETABILITY - description: moai is a PyTorch-based AI Model Development Kit (MDK) that seeks to improve data-driven model workflows, design and understanding. It relies on hydra for handling configuration and lightning for handling infrastructure. As a kit, It offers a set of actions to `train` or `evaluate` models using the corresponding actions which consume configuration files. Apart from the definition of the model, data, training scheme, optimizer, visualization and logging, these configuration files additionally use named tensors to define tensor processing graphs. These are created by chaining various building blocks called monads, which are functional units or otherwise single responsibility modules. Monad parameters and input/output tensors are defined on the configuration file, allowing for the entire model to be summarized into a single file. This opens up novel functionalities like querying for inter-model differences using the `diff` action, or aggregating the results of multiple models using the `plot` action which uses hiplot to compare models in various ways. moai facilitates high quality reproduction (using the `reprod` action), as apart from automatically handling all boilerplate related to it, it standardizes the process of developing modules/monads and implicitly logs all hyperparameters. Even though no code is required, moai exploits python’s flexibility to allow developers to integrate their own code into its engine from external projects, vastly increasing their productivity. - link: https://github.com/ai-in-motion/moai - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/B4.png - section: B4 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/B4-thumb.png - title: "moai: A Model Development Kit to Accelerate Data-driven Workflows" -- authors: - - Vaibhav Singh - - Rajesh Thallam - - Jordan Totten - - Karl Weinmeister - categories: - - ML Ops, MODELS, MODEL OPTIMIZATION & INTERPRETABILITY - description: Machine Learning Operationalization has rapidly evolved in the last few years with a growing set of tools for each phase of development. From experimentation to automated model analysis and deployment, each of these tools offer some unique capabilities. In this work we survey a slice of these tools and demonstrate an opinionated example of an end to end CI/CD pipeline for PyTorch model development and deployment using Vertex AI SDK. The goal of this session is to aid an informed conversation on the choices available to PyTorch industry practitioners who are looking to operationalize their ML models, and to researchers who are simply trying to organize their experiments. Although our implementation example will make tool choices at various stages, we will be focused on ML design patterns that are applicable to a wide variety of commercial and open-source offerings. - link: https://github.com/GoogleCloudPlatform/vertex-ai-samples - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/B3.png - section: B3 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/B3-thumb.png - title: "Building Production ML Pipelines for PyTorch Models" -- authors: - - George Hosu - - Particio Cerda-Mardini - - Natasha Seelam - - Jorge Torres - categories: - - ML Ops, MODELS, MODEL OPTIMIZATION & INTERPRETABILITY - description: Nearly 64% of companies take over a month to a year to deploy a single machine learning (ML) model into production [1]. Many of these companies cite key challenges integrating with complex ML frameworks as a root cause [1], as there is still a gap between where data lives, how models are trained, and how downstream applications access predictions from models [1, 2]. MindsDB is a PyTorch-based ML platform that aims to solve fundamental MLOps challenges by abstracting ML models as “virtual tables”, allowing models to be queried in the same natural way users work with data in databases. As data is diverse and varied, we recently developed an open-source declarative syntax, named “JSON-AI” to allow others to customize ML model internals without changing source code. We believe that the key elements of the data science (DS)/ML pipeline, namely data pre-processing/cleaning, feature engineering, and model-building [2], should be automated in a robust, reliable, and reproducible manner with simplicity. JSON-AI allows you refined control of each of these steps, and enables users to bring custom routines into their ML pipeline. In our poster, we will show how a user interfaces with JSON-AI to bring original approaches to each of the aforementioned parts of the DS/ML, along with control over analysis and explainability tools. [1] Algorithmia (2021). 2021 state of enterprise machine learning [2] “How Much Automation Does a Data Scientist Want?” ArXiV (2021) - link: https://github.com/mindsdb/mindsdb/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/B2.png - section: B2 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/B2-thumb.png - title: "Customizing MLOps pipelines with JSON-AI: a declarative syntax to streamline ML in the database" -# - authors: -# - Moses Gurtmann -# - Erez Schnaiderl -# categories: -# - ML Ops, MODELS, MODEL OPTIMIZATION & INTERPRETABILITY -# description: "Both from sanity considerations and the productivity perspective, Data Scientists, ML engineers, Graduate students, and other research-facing roles are all starting to adopt best-practices from production-grade MLOps. However, most toolchains come with a hefty price of extra code and maintenance, which reduces the actual time available for R&D. We will show an alternative approach using ClearML, the open-source MLOps solution. In this “best-practices” poster, we will overview the “must-haves” of R&D-MLOPs: Orchestration, Automation, and Reproducibility. These enable easy remote execution through magically reproducible setups and even custom, reusable, bottom-up pipelines. We will take a single example and schematically transform it from the “as downloaded from GitHub” stage to a fully-fledged, scalable, version-controlled, parameterizable R&D pipeline. We will measure the number of changes needed to the codebase and provide evidence of real low-cost integration. All code, logs, and metrics will be available as supporting information" -# link: -# poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/B1.png -# section: B1 -# thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/B1-thumb.png -# title: "The Fundamentals of MLOps for R&D: Orchestration, Automation, Reproducibility" -- authors: - - Robin Lobel - categories: - - ACCELERATORS, TOOLS, LIBRARY, DATA - description: TorchStudio is an open-source, full-featured IDE for PyTorch. It aims to simplify the creation, training and iterations of AI models. It can load, analyze and explore datasets from the TorchVision or TorchAudio categories, or custom datasets with any format and number of inputs and outputs. TorchVision, TorchAudio or custom models can then be loaded or written from scratch, debugged, visualized as a graph, and trained using local hardware, a distant server or GPUs in the cloud. Trainings can then be compared in the dashboard with several analyzing tools to help you identify the best performing set of models and hyper parameters and export it as TorchScript or ONNX files. TorchStudio is also highly customizable, with 90% of its functionalities accessible as open source scripts and independent modules, to fit as many AI scenario as possible. - link: https://torchstudio.ai/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/A10.png - section: A10 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/A10-thumb.png - title: "TorchStudio, a full featured IDE for PyTorch" -- authors: - - Mark Saroufim - - Hamid Shojanazeri - - Patrick Hu - - Geeta Chauhan - - Jing Xu - - Jianan Gu - - Jiong Gong - - Ashok Emani - - Eikan Wang - - Min Jean Cho - - Fan Zhao - categories: - - ACCELERATORS, TOOLS, LIBRARY, DATA - description: "Accelerate TorchServe with IntelÂź Extension for PyTorch: Intel is collaborating with Meta to take advantage of performance boosting from IntelÂź Extension for PyTorch* from TorchServe, so that users can easily deploy their PyTorch models with out of the box satisfying performance. With these SW advancements, we demonstrated ease-of-use IPEX user-facing API, and we also showcased speed-up with IntelÂź Extension for PyTorch* FP32 inference with the stock PyTorch and speed-up with IntelÂź Extension for PyTorch* INT8 inference with the stock PyTorch." - link: www.intel.com/Performanceindex - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/A9.png - section: A9 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/A9-thumb.png - title: "Accelerate TorchServe with Intel Extension for PyTorch" -# - authors: -# - Isaac Godfried -# - Anton Polishko -# categories: -# - ACCELERATORS, TOOLS, LIBRARY, DATA -# description: Flow Forecast is a multi-purpose open-source deep learning for time series forecasting, classification, and anomaly detection framework built in PyTorch. Flow Forecast utilizes modular code design, unit/integration tests, model/prediction visualizations, and native cloud provider integration in order to allow researchers to rapidly experiment with new model architectures, benchmark their results on popular datasets and reproduce their results. Simultaneously it aids industry data scientists to deploy models to production, periodically retrain models, and explain model decisions to stakeholders through easy to use APIs, out of the box interpretability methods (e.g. SHAP), and model deployment support. Flow Forecast supports a broad variety of deep time series models such as LSTMs, GRUs, Transformers, and GNNs. It also features easy multitask learning support and loaders to help with geo-spatial-temporal data. -# link: https://github.com/AIStream-Peelout/flow-forecast -# poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/A8.png -# section: A8 -# thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/A8-thumb.png -# title: "Flow Forecast: A deep learning for time series forecasting , classification , and anomaly detection framework" -# - authors: -# - Patrick Kidger -# categories: -# - ACCELERATORS, TOOLS, LIBRARY, DATA -# description: "Turn this: -# ``` -# def batch_outer_product(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: -# # x has shape (batch, x_channels) -# # y has shape (batch, y_channels) -# # return has shape (batch, x_channels, y_channels) - -# return x.unsqueeze(-1) * y.unsqueeze(-2) -# ``` -# into this: -# ``` -# def batch_outer_product(x: TensorType[\"\"batch\"\", \"\"x_channels\"\"], -# y: TensorType[\"\"batch\"\", \"\"y_channels\"\"] -# ) -> TensorType[\"\"batch\"\", \"\"x_channels\"\", \"\"y_channels\"\"]: - -# return x.unsqueeze(-1) * y.unsqueeze(-2) -# ``` -# with programmatic checking that the shape (dtype, ...) specification is met! - -# Bye-bye bugs -- say hello to enforced, clear documentation of PyTorch code. -# torchtyping may be used instead of littering code with comments like `# x has shape (batch, hidden_state)` or statements like `assert x.shape == y.shape`, just to keep track of what shape/dtype/etc everything is." -# link: https://github.com/patrick-kidger/torchtyping -# poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/A7.png -# section: A7 -# thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/A7-thumb.png -# title: "TorchTyping: rich type annotations of shape, dtype, etc
" -# - authors: -# - Yashl Kanungo -# - Sumit Negi -# categories: -# - ACCELERATORS, TOOLS, LIBRARY, DATA -# description: "Amazon Ads helps companies build their brand and connect with shoppers, through ads shown both within and beyond Amazon’s store, including websites, apps, and streaming TV content in more than 15 countries. Businesses or brands of all sizes including registered sellers, vendors, book vendors, Kindle Direct Publishing (KDP) authors, app developers, and agencies on Amazon marketplaces can upload their own ad creatives, which can include images, video, audio, and of course products sold on Amazon. For our text ad processing, we deploy PyTorch based BERT models globally on AWS Inferentia based Inf1 instances. By moving to Inferentia from GPUs, we were able to lower our cost by 69% with comparable performance." -# link: https://aws.amazon.com/blogs/aws/scaling-ad-verification-with-machine-learning-and-aws-inferentia/ -# poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/A6.png -# section: A6 -# thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/A6-thumb.png -# title: Scaling Ad Classification with Machine Learning on PyTorch and AWS Inferentia -# - authors: -# - Bharath Ramsundar -# categories: -# - ACCELERATORS, TOOLS, LIBRARY, DATA -# description: "DeepChem uses PyTorch to implement a number of scientific deep learning models for use in modeling proteins, small molecules, materials and physical simulations. DeepChem aims to become a powerful domain specific language for scientific applications that leverages PyTorch to provide a solid base for our models." -# link: -# poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/A5.png -# section: A5 -# thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/A5-thumb.png -# title: "DeppChem: A Toolbox for AI driven Science" -- authors: - - Clement Fuji Tsang - - Jean-Francois Lafleche - - Charles Loop - - Masha Shugrina - - Towaki Takikawa - - Jiehan Wang - categories: - - ACCELERATORS, TOOLS, LIBRARY, DATA - description: "NVIDIA Kaolin is a suite of tools for accelerating 3D Deep Learning research. The Kaolin library provides a PyTorch API for working with a variety of 3D representations and includes a growing collection of GPU-optimized operations such as modular differentiable rendering, fast conversions between representations, loss functions, data loading, 3D checkpoints and more. The library also contains a lightweight 3D visualizer Dash3D and can work with an Omniverse companion app for dataset/checkpoint visualization and synthetic data generation." - link: - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/A3.png - section: A3 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/A3-thumb.png - title: Kaolin Library -- authors: - - Jack Cao - - Milad Mohammadi - - Zak Stone - - Vaibhav Singh - - Calvin Pelletier - - Shauheen Zahirazami - categories: - - ACCELERATORS, TOOLS, LIBRARY, DATA - description: "PyTorch / XLA offers PyTorch users the ability to train their models on XLA devices including Cloud TPUs. This compiled path often makes it possible to utilize creative optimizations and achieve top performance on target XLA devices. With the introduction of Cloud TPU VMs, users have direct access to TPU host machines and therefore a great level of flexibility. In addition, TPU VMs make debugging easier and reduce data transfer overheads. Google has also recently announced the availability of Cloud TPU v4 Pods, which are exaflop-scale supercomputers for machine learning. Cloud TPU v4 Pods offer a whole new level of performance for large-scale PyTorch / XLA training of ML models." - link: - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/A2.png - section: A2 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/A2-thumb.png - title: Accelerate PyTorch training with Cloud TPUs -- authors: - - Antonio Kim - - Behzad Abghari - - Chris Oliver - - Cynthia Liu - - Mark Browning - - Vishal Subbiah - - Kamran Jafari - - Emad Barsoum - - Jessica Liu - - Sean Lie - categories: - - ACCELERATORS, TOOLS, LIBRARY, DATA - description: "The Cerebras Wafer Scale Engine (WSE) is the largest processor ever built, dedicated to accelerating deep learning model for training and inference. A single chip in a single CS-2 system provides the compute power of a cluster of GPUs but acts as a single processor, making it also much simpler to use. We present the current PyTorch backend architecture for the Cerebras CS-2 and how we go all the way from PyTorch to laying out the model graph on the wafer. Additionally, we will discuss the advantages of training on Cerebras hardware and its unique capabilities." - link: https://cerebras.net - poster_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/A1.png - section: A1 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/ptdd2021/posters/A1-thumb.png - title: Accelerating PyTorch on the largest chip ever built (WSE) \ No newline at end of file diff --git a/_data/ecosystem/pted/2021/posters.yaml b/_data/ecosystem/pted/2021/posters.yaml deleted file mode 100644 index 5f5f22524a3a..000000000000 --- a/_data/ecosystem/pted/2021/posters.yaml +++ /dev/null @@ -1,1853 +0,0 @@ -- authors: - - Josh Izaac - - Thomas Bromley - categories: - - Platform, Ops & Tools - description: - PennyLane allows you to train quantum circuits just like neural networks!, - This poster showcases how PennyLane can be interfaced with PyTorch to enable training - of quantum and hybrid machine learning models. The outputs of a quantum circuit - are provided as a Torch tensor with a defined gradient. We highlight how this - functionality can be used to explore new paradigms in machine learning, including - the use of hybrid models for transfer learning. - link: http://pennylane.ai - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/K1.png - section: K1 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-K1.png - title: Bring quantum machine learning to PyTorch with PennyLane -- authors: - - Jeffrey Mew - categories: - - Compiler & Transform & Production - description: - "Visual Studio Code, a free cross-platform lightweight code editor,\ - \ has become the most popular among Python developers for both web and machine\ - \ learning projects. We will be walking you through an end to end PyTorch project\ - \ to showcase what VS Code has a lot to offer to PyTorch developers to boost their\ - \ productivity.\n \nFirstly, get your PyTorch project quickly up and running with\ - \ VS Code's environment/dependency management and built-in Jupyter Notebook support.\ - \ Secondly, breeze through coding with help from our AI-powered IntelliSense.\ - \ When it's time to run your code, use the built-in Tensorboard integration to\ - \ monitor your training along with the integrated PyTorch profiler to analyze\ - \ and debug your code. Once you're ready for the cloud, VS Code has Azure service\ - \ integration to allow you to scale your model training and deployment, along\ - \ with deployment.\n \nCombing the power of the code editor with easy access to\ - \ the Azure services, VS Code can be the one-stop shop for any developers looking\ - \ to build machine learning models with PyTorch." - link: https://pytorch.org/blog/introducing-pytorch-profiler-the-new-and-improved-performance-tool/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/A4.png - section: A4 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-A4.png - title: PyTorch development in VS Code -- authors: - - Yanan Cao - - Harry Kim - - Jason Ansel - categories: - - Compiler & Transform & Production - description: - TorchScript is the bridge between PyTorch's flexible eager mode to - more deterministic and performant graph mode suitable for production deployment. - As part of PyTorch 1.9 release, TorchScript will launch a few features that we'd - like to share with you earlier, including a) a new formal language specification - that defines the exact subset of Python/PyTorch features supported in TorchScript; - b) Profile-Directed Typing that reduces the burden of converting a loosely-typed - eager model into a strictly-typed TorchScript model; c) A TorchScript profiler - that can shed light on performance characteristics of TorchScript model. We are - constantly making improvements to make TorchScript easier to use and more performant. - link: http://fb.me/torchscript - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/A5.png - section: A5 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-A5.png - title: Upcoming features in TorchScript -- authors: - - Alessandro Pappalardo - categories: - - Compiler & Transform & Production - description: - Brevitas is an open-source PyTorch library for quantization-aware training. - Thanks to its flexible design at multiple levels of abstraction, Brevitas generalizes - the typical uniform affine quantization paradigm adopted in the deep learning - community under a common set of unified APIs. Brevitas provides a platform to - both ML practitioners and researchers to either apply built-in state-of-the-art - techniques in training for reduced-precision inference, or to implement novel - quantization-aware training algorithms. Users can target supported inference toolchains, - such as onnxruntime, TVM, Vitis AI, FINN or PyTorch itself, or experiment with - hypothetical target hardware platforms. In particular, when combined with the - flexibility of Xilinx FPGAs through the FINN toolchain, Brevitas supports the - co-design of novel hardware building blocks in a machine-learning driven fashion. - Within Xilinx, Brevitas has been adopted by various research projects concerning - quantized neural networks, as well as in large scale deployments targeting custom - programmable logic accelerators. - link: https://github.com/Xilinx/brevitas/ - section: B4 - title: Quantization-Aware Training with Brevitas -- authors: - - Jerry Zhang - - Vasiliy Kuznetsov - - Raghuraman Krishnamoorthi - categories: - - Compiler & Transform & Production - description: - Quantization is a common model optimization technique to speedup runtime - of a model by upto 4x, with a possible slight loss of accuracy. Currently, PyTorch - support Eager Mode Quantization. FX Graph Mode Quantization improves upon Eager - Mode Quantization by adding support for functionals and automating the quantization - process. To use FX Graph Mode Quantization, one might need to refactor the model - to make the model compatible with FX Graph Mode Quantization (symbolically traceable - with torch.fx). - link: https://pytorch.org/docs/master/quantization.html#prototype-fx-graph-mode-quantization - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/B5.png - section: B5 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-B5.png - title: "PyTorch Quantization: FX Graph Mode Quantization" -- authors: - - Fabio Nonato - categories: - - Compiler & Transform & Production - description: - " Deep learning models can have game-changing impact on machine learning\ - \ applications. However, deploying and managing deep learning models in production\ - \ is complex and requires considerable engineering effort - from building custom\ - \ inferencing APIs and scaling prediction services, to securing applications,\ - \ while still leveraging the latest ML frameworks and hardware technology. Amazon\ - \ EC2 Inf1 instances powered by AWS Inferentia deliver the highest performance\ - \ and lowest cost machine learning inference in the cloud. Developers can deploy\ - \ their deep-learning models to Inf1 instances using the AWS Neuron SDK that is\ - \ natively integrated with PyTorch.\n \nAttend this poster session to learn how\ - \ you can optimize and accelerate the deployment of your deep learning models\ - \ in production using Inf1 instances and TorchServe containers. You will learn\ - \ how to deploy TorchScript models on Inf1 and optimize your models with minimal\ - \ code changes with features such as NeuronCore Groups and NeuronCore Pipeline,\ - \ to meet your throughput and latency requirements. You can directly integrate\ - \ these model level optimizations into the inference endpoint using TorchServe.\n\ - \ \nWe will also deep dive into how we optimized performance of a natural language\ - \ processing endpoint and showcase the workflow for deploying the optimized model\ - \ using TorchServe containers on Amazon ECS." - link: https://bit.ly/3mQVowk - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/C4.png - section: C4 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-C4.png - title: - Accelerate deployment of deep learning models in production with Amazon EC2 - Inf1 and TorchServe containers -- authors: - - James Reed - - Zachary DeVito - - Ansley Ussery - - Horace He - - Michael Suo - categories: - - Compiler & Transform & Production - description: - "FX is a toolkit for writing Python-to-Python transforms over PyTorch\ - \ code.\nFX consists of three parts:\n> Symbolic Tracing \u2013 a method to extract\ - \ a representation of the program by running it with \"proxy\" values.\n> Graph-based\ - \ Transformations \u2013 FX provides an easy-to-use Python-based Graph API for\ - \ manipulating the code.\n> Python code generation \u2013 FX generates valid Python\ - \ code from graphs and turns that code into executable Python `nn.Module` instances." - link: https://pytorch.org/docs/stable/fx.html - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/C5.png - section: C5 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-C5.png - title: Torch.fx -- authors: - - Abhijit Khobare - - Murali Akula - - Tijmen Blankevoort - - Harshita Mangal - - Frank Mayer - - Sangeetha Marshathalli Siddegowda - - Chirag Patel - - Vinay Garg - - Markus Nagel - categories: - - Compiler & Transform & Production - description: - "AI is revolutionizing industries, products, and core capabilities - by delivering dramatically enhanced experiences. However, the deep neural networks - of today use too much memory, compute, and energy. To make AI truly ubiquitous, - it needs to run on the end device within a tight power and thermal budget. Quantization - and compression help address these issues. In this tutorial, we'll discuss: - - The existing quantization and compression challenges - - Our research in novel quantization and compression techniques to overcome these - challenges - - How developers and researchers can implement these techniques through the AI Model - Efficiency Toolkit" - link: "" - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/D4.png - section: D4 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-D4.png - title: AI Model Efficiency Toolkit (AIMET) -- authors: - - Natasha Seelam - - Patricio Cerda-Mardini - - Cosmo Jenytin - - Jorge Torres - categories: - - Database & AI Accelerators - description: - 'Pytorch enables building models with complex inputs and outputs, including - time-series data, text and audiovisual data. However, such models require expertise - and time to build, often spent on tedious tasks like cleaning the data or transforming - it into a format that is expected by the models. - - - Thus, pre-trained models are often used as-is when a researcher wants to experiment - only with a specific facet of a problem. See, as examples, FastAI''s work into - optimizers, schedulers, and gradual training through pre-trained residual models, - or NLP projects with Hugging Face models as their backbone. - - - We think that, for many of these problems, we can automatically generate a "good - enough" model and data-processing pipeline from just the raw data and the endpoint. - To address this situation, we are developing MindsDB, an open-source, PyTorch-based - ML platform that works inside databases via SQL commands. It is built with a modular - approach, and in this talk we are going to focus on Lightwood, the stand-alone - core component that performs machine learning automation on top of the PyTorch - framework. - - - Lightwood automates model building into 5 stages: (1) classifying each feature - into a "data type", (2) running statistical analyses on each column of a dataset, - (3) fitting multiple models to normalize, tokenize, and generate embeddings for - each feature, (4) deploying the embeddings to fit a final estimator, and (5) running - an analysis on the final ensemble to evaluate it and generate a confidence model. - It can generate quick "baseline" models to benchmark performance for any custom - encoder representation of a data type and can also serve as scaffolding for investigating - new hypotheses (architectures, optimizers, loss-functions, hyperparameters, etc). - - - We aim to present our benchmarks covering wide swaths of problem types and illustrate - how Lightwood can be useful for researchers and engineers through a hands-on demo.' - link: https://mindsdb.com - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/H8.png - section: H8 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-H8.png - title: - "Pytorch via SQL commands: A flexible, modular AutoML framework that democratizes - ML for database users" -- authors: - - "Sam Partee " - - Alessandro Rigazzi - - Mathew Ellis - - Benjamin Rob - categories: - - Database & AI Accelerators - description: - SmartSim is an open source library dedicated to enabling online analysis - and Machine Learning (ML) for traditional High Performance Computing (HPC) simulations. - Clients are provided in common HPC simulation languages, C/C++/Fortran, that enable - simulations to perform inference requests in parallel on large HPC systems. SmartSim - utilizes the Redis ecosystem to host and serve PyTorch models alongside simulations. - We present a use case of SmartSim where a global ocean simulation, used in climate - modeling, is augmented with a PyTorch model to resolve quantities of eddy kinetic - energy within the simulation. - link: https://github.com/CrayLabs/SmartSim - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/J8.png - section: J8 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-J8.png - title: PyTorch on Supercomputers Simulations and AI at Scale with SmartSim -- authors: - - Patricio Cerda-Mardini - - Natasha Seelam - categories: - - Database & AI Accelerators - description: - 'Many domains leverage the extraordinary predictive performance of - machine learning algorithms. However, there is an increasing need for transparency - of these models in order to justify deploying them in applied settings. Developing - trustworthy models is a great challenge, as they are usually optimized for accuracy, - relegating the fit between the true and predicted distributions to the background - [1]. This concept of obtaining predicted probability estimates that match the - true likelihood is also known as calibration. - - - Contemporary ML models generally exhibit poor calibration. There are several methods - that aim at producing calibrated ML models [2, 3]. Inductive conformal prediction - (ICP) is a simple yet powerful framework to achieve this, offering strong guarantees - about the error rates of any machine learning model [4]. ICP provides confidence - scores and turns any point prediction into a prediction region through nonconformity - measures, which indicate the degree of inherent strangeness a data point presents - when compared to a calibration data split. - - - In this work, we discuss the integration of ICP with MindsDB --an open source - AutoML framework-- successfully replacing its existing quantile loss approach - for confidence estimation capabilities. - - Our contribution is threefold. First, we present a study on the effect of a "self-aware" - neural network normalizer in the width of predicted region sizes (also known as - efficiency) when compared to an unnormalized baseline. Our benchmarks consider - results for over 30 datasets of varied domains with both categorical and numerical - targets. Second, we propose an algorithm to dynamically determine the confidence - level based on a target size for the predicted region, effectively prioritizing - efficiency over a minimum error rate. Finally, we showcase the results of a nonconformity - measure specifically tailored for small datasets. - - - References: - - [1] Guo, C., Pleiss, G., Sun, Y., & Weinberger, K.Q. (2017). On Calibration of - Modern Neural Networks. ArXiv, abs/1706.04599. - - [2] Naeini, M., Cooper, G., & Hauskrecht, M. (2015). Obtaining Well Calibrated - Probabilities Using Bayesian Binning. Proceedings of the AAAI Conference on Artificial - Intelligence. AAAI Conference on Artificial Intelligence, 2015, 2901-2907 . - - [3] Maddox, W., Garipov, T., Izmailov, P., Vetrov, D., & Wilson, A. (2019). A - Simple Baseline for Bayesian Uncertainty in Deep Learning. NeurIPS. - - [4] Papadopoulos, H., Vovk, V., & Gammerman, A. (2007). Conformal Prediction with - Neural Networks. 19th IEEE International Conference on Tools with Artificial Intelligence - (ICTAI 2007), 2, 388-395.' - link: https://mindsdb.com - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/I8.png - section: I8 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-I8.png - title: Model agnostic confidence estimation with conformal predictors for AutoML -- authors: - - Derek Bouius - categories: - - Database & AI Accelerators - description: - AMD Instinct GPUs are enabled with the upstream PyTorch repository - via the ROCm open software platform. Now users can also easily download the installable - Python package, built from the upstream PyTorch repository and hosted on pytorch.org. - Notably, it includes support for distributed training across multiple GPUs and - supports accelerated mixed precision training. AMD also provides hardware support - for the PyTorch community build to help develop and maintain new features. This - poster will highlight some of the work that has gone into enabling PyTorch support. - link: https://www.amd.com/rocm - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/K8.png - section: K8 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-K8.png - title: - "Enabling PyTorch on AMD Instinct\u2122 GPUs with the AMD ROCm\u2122 Open\ - \ Software Platform" -- authors: - - DeepSpeed Team Microsoft Corporation - categories: - - Distributed Training - description: - "In the poster (and a talk during the breakout session), we will present - three aspects of DeepSpeed (https://github.com/microsoft/DeepSpeed), a deep learning - optimization library based on PyTorch framework: 1) How we overcome the GPU memory - barrier by ZeRO-powered data parallelism. 2) How we overcome the network bandwidth - barrier by 1-bit Adam and 1-bit Lamb compressed optimization algorithms. 3) How - we overcome the usability barrier by integration with Azure ML, HuggingFace, and - PyTorch Lightning." - link: "" - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/E1.png - section: E1 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-E1.png - title: "DeepSpeed: Shattering barriers of deep learning speed & scale" -- authors: - - Stephanie Kirmer - - Hugo Shi - categories: - - Distributed Training - description: - We have developed a library that helps simplify the task of multi-machine - parallel training for PyTorch models, bringing together the power of PyTorch DDP - with Dask for parallelism on GPUs. Our poster describes the library and its core - function, and demonstrates how the multi-machine training process works in practice. - link: https://github.com/saturncloud/dask-pytorch-ddp - section: E2 - title: - "Dask PyTorch DDP: A new library bringing Dask parallelization to PyTorch - training" -- authors: - - Vignesh Gopakumar - categories: - - Distributed Training - description: - Solving PDEs using Neural Networks are often ardently laborious as - it requires training towards a well-defined solution, i.e. global minima for a - network architecture - objective function combination. For a family of complex - PDEs, Physics Informed neural networks won't offer much in comparison to traditional - numerical methods as their global minima becomes more and more intractable. We - propose a modified approach that hinges on continual and parametrised learning - that can create more general PINNs that can solve for a variety of PDE scenarios - rather than solving for a well-defined case. We believe that this brings Neural - Network based PDE solvers in comparison to numerical solvers. - link: "" - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/E3.png - section: E3 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-E3.png - title: Optimising Physics Informed Neural Networks. -- authors: - - Mandeep Baines - - Shruti Bhosale - - Vittorio Caggiano - - Benjamin Lefaudeux - - Vitaliy Liptchinsky - - Naman Goyal - - Siddhardth Goyal - - Myle Ott - - Sam Sheifer - - Anjali Sridhar - - Min Xu - categories: - - Distributed Training - description: - 'FairScale is a library that extends basic PyTorch capabilities while - adding new SOTA techniques for high performance and large scale training on one - or multiple machines. FairScale makes available the latest distributed training - techniques in the form of composable modules and easy to use APIs. - - - Machine Learning (ML) training at scale traditionally means data parallelism to - reduce training time by using multiple devices to train on larger batch size. - Nevertheless, with the recent increase of ML models sizes data parallelism is - no longer enough to satisfy all "scaling" needs. FairScale provides several options - to overcome some of the limitations to scale. - - - For scaling training that is bottlenecked by memory (optimizer state, intermediate - activations, parameters), FairScale provides APIs that have implemented optimizer, - gradient and parameter sharding. This will allow users to train large models using - devices in a more memory efficient manner. - - - To overcome the memory required for large models FairScale provides various flavors - of pipeline and model parallelism, MOE (Mixture Of Experts) layer, and Offload - models. Those methods allow to perform computation only of shards of the models - across multiple devices with micro batches of data to maximize device efficiency. - - - FairScale also provides modules to aid users to scale batch size effectively without - changing their existing learning rate hyperparameter - AdaScale - and save memory - with checkpoint activation of intermediate layers. - - - FairScale has also been integrated into Pytorch Lightening, HuggingFace, FairSeq, - VISSL, and MMF to enable users of those frameworks to take advantage of its features.' - link: "" - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/F1.png - section: F1 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-F1.png - title: - FairScale-A general purpose modular PyTorch library for high performance - and large scale training -- authors: - - Aurick Qiao - - Sang Keun Choe - - Suhas Jayaram Subramanya - - Willie Neiswanger - - Qirong Ho - - Hao Zhang - - Gregory R. Ganger - - Eric P. Xing - categories: - - Distributed Training - description: - "AdaptDL is an open source framework and scheduling algorithm that - directly optimizes cluster-wide training performance and resource utilization. - By elastically re-scaling jobs, co-adapting batch sizes and learning rates, and - avoiding network interference, AdaptDL improves shared-cluster training compared - with alternative schedulers. AdaptDL can automatically determine the optimal number - of resources given a job's need. It will efficiently add or remove resources - dynamically to ensure the highest-level performance. The AdaptDL scheduler will - automatically figure out the most efficient number of GPUs to allocate to your - job, based on its scalability. When the cluster load is low, your job can dynamically - expand to take advantage of more GPUs. AdaptDL offers an easy-to-use API to make - existing PyTorch training code elastic with adaptive batch sizes and learning - rates. - - Showcase: Distributed training and Data Loading" - link: "" - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/F2.png - section: F2 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-F2.png - title: - "AdaptDL: An Open-Source Resource-Adaptive Deep Learning Training/Scheduling - Framework" -- authors: - - Natalie Kershaw - categories: - - Distributed Training - description: - "As deep learning models, especially transformer models get bigger - and bigger, reducing training time becomes both a financial and environmental - imperative. ONNX Runtime can accelerate large-scale distributed training of PyTorch - transformer models with a one-line code change (in addition to import statements - ;-)) Adding in the DeepSpeed library improves training speed even more. - - - With the new ORTModule API, you wrap an existing torch.nn.Module, and have us - automatically: export the model as an ONNX computation graph; compile and optimize - it with ONNX Runtime; and integrate it into your existing training script. - - - In this poster, we demonstrate how to fine-tune a popular HuggingFace model and - show the performance improvement, on a multi-GPU cluster in the Azure Machine - Learning cloud service." - link: https://aka.ms/pytorchort - section: G1 - title: - "Accelerate PyTorch large model training with ONNX Runtime: just add one - line of code!" -- authors: - - Jack Cao - - Daniel Sohn - - Zak Stone - - Shauheen Zahirazami - categories: - - Distributed Training - description: - PyTorch / XLA enables users to train PyTorch models on XLA devices - including Cloud TPUs. Cloud TPU VMs now provide direct access to TPU host machines - and hence offer much greater flexibility in addition to making debugging easier - and reducing data transfer overheads. PyTorch / XLA has now full support for this - new architecture. A new profiling tool has also been developed to enable better - profiling of PyTorch / XLA. These improvements not only make it much easier to - develop models but also reduce the cost of large-scale PyTorch / XLA training - runs on Cloud TPUs. - link: http://goo.gle/pt-xla-tpuvm-signup - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/G2.png - section: G2 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-G2.png - title: PyTorch/XLA with new Cloud TPU VMs and Profiler -- authors: - - Ari Bornstein - categories: - - Frontend & Experiment Manager - description: - PyTorch Lightning reduces the engineering boilerplate and resources - required to implement state-of-the-art AI. Organizing PyTorch code with Lightning - enables seamless training on multiple-GPUs, TPUs, CPUs, and the use of difficult - to implement best practices such as model sharding, 16-bit precision, and more, - without any code changes. In this poster, we will use practical Lightning examples - to demonstrate how to train Deep Learning models with less boilerplate. - link: https://www.pytorchlightning.ai/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/E4.png - section: E4 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-E4.png - title: "PyTorch Lightning: Deep Learning without the Boilerplate" -- authors: - - Jiong Gong - - Nikita Shustrov - - Eikan Wang - - Jianhui Li - - Vitaly Fedyunin - categories: - - Frontend & Experiment Manager - description: - "Intel and Facebook collaborated to enable BF16, a first-class data\ - \ type in PyTorch, and a data type that are accelerated natively with the 3rd\ - \ Gen Intel\xAE Xeon\xAE scalable processors. This poster introduces the latest\ - \ SW advancements added in Intel Extension for PyTorch (IPEX) on top of PyTorch\ - \ and the oneAPI DNN library for ease-of-use and high-performance BF16 DL compute\ - \ on CPU. With these SW advancements, we demonstrated ease-of-use IPEX user-facing\ - \ API, and we also showcased 1.55X-2.42X speed-up with IPEX BF16 training over\ - \ FP32 with the stock PyTorch and 1.40X-4.26X speed-up with IPEX BF16 inference\ - \ over FP32 with the stock PyTorch." - link: https://github.com/intel/intel-extension-for-pytorch - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/E5.png - section: E5 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-E5.png - title: Accelerate PyTorch with IPEX and oneDNN using Intel BF16 Technology -- authors: - - Robin Lobel - categories: - - Frontend & Experiment Manager - description: - TorchStudio is a standalone software based on PyTorch and LibTorch. - It aims to simplify the creation, training and iterations of PyTorch models. It - runs locally on Windows, Ubuntu and macOS. It can load, analyze and explore PyTorch - datasets from the TorchVision or TorchAudio categories, or custom datasets with - any number of inputs and outputs. PyTorch models can then be loaded and written - from scratch, analyzed, and trained using local hardware. Trainings can be run - simultaneously and compared to identify the best performing models, and export - them as a trained TorchScript or ONNX model. - link: https://torchstudio.ai/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/F4.png - section: F4 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-F4.png - title: TorchStudio, a machine learning studio software based on PyTorch -- authors: - - Jieru Hu - - "Omry Yadan " - categories: - - Frontend & Experiment Manager - description: - "Hydra is an open source framework for configuring and launching research - Python applications. Key features: - Compose and override your config dynamically - to get the perfect config for each run - Run on remote clusters like SLURM and - AWS without code changes - Perform basic greed search and hyper parameter optimization - without code changes - Command line tab completion for your dynamic config And - more." - link: "" - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/F5.png - section: F5 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-F5.png - title: Hydra Framework -- authors: - - Victor Fomin - - Sylvain Desroziers - - Taras Savchyn - categories: - - Frontend & Experiment Manager - description: - This poster intends to give a brief but illustrative overview of what - PyTorch-Ignite can offer for Deep Learning enthusiasts, professionals and researchers. - Following the same philosophy as PyTorch, PyTorch-Ignite aims to keep it simple, - flexible and extensible but performant and scalable. Throughout this poster, we - will introduce the basic concepts of PyTorch-Ignite, its API and features it offers. - We also assume that the reader is familiar with PyTorch. - link: "" - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/G4.png - section: G4 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-G4.png - title: "PyTorch-Ignite: training common things easy and the hard things possible" -- authors: - - Sanzhar Askaruly - - Nurbolat Aimakov - - Alisher Iskakov - - Hyewon Cho - categories: - - Medical & Healthcare - description: - Deep learning has transformed many aspects of industrial pipelines - recently. Scientists involved in biomedical imaging research are also benefiting - from the power of AI to tackle complex challenges. Although the academic community - has widely accepted image processing tools, such as scikit-image, ImageJ, there - is still a need for a tool which integrates deep learning into biomedical image - analysis. We propose a minimal, but convenient Python package based on PyTorch - with common deep learning models, extended by flexible trainers and medical datasets. - link: https://github.com/tuttelikz/farabio - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/H4.png - section: H4 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-H4.png - title: Farabio - Deep Learning Toolkit for Biomedical Imaging -- authors: - - Michael Zephyr - - Prerna Dogra Richard Brown - - Wenqi Li - - Eric Kerfoot - categories: - - Medical & Healthcare - description: - "Healthcare image analysis for both radiology and pathology is increasingly\ - \ being addressed with deep-learning-based solutions. These applications have\ - \ specific requirements to support various imaging modalities like MR, CT, ultrasound,\ - \ digital pathology, etc. It is a substantial effort for researchers in the field\ - \ to develop custom functionalities to handle these requirements. Consequently,\ - \ there has been duplication of effort, and as a result, researchers have incompatible\ - \ tools, which makes it hard to collaborate.\n \nMONAI stands for Medical Open\ - \ Network for AI. Its mission is to accelerate the development of healthcare imaging\ - \ solutions by providing domain-specialized building blocks and a common foundation\ - \ for the community to converge in a native PyTorch paradigm." - link: https://monai.io/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/H5.png - section: H5 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-H5.png - title: "MONAI: A Domain Specialized Library for Healthcare Imaging" -- authors: - - Shai Brown - - Daniel Neimark - - Maya Zohar - - Omri Bar - - Dotan Asselmann - categories: - - Medical & Healthcare - description: - "Theator is re-imagining surgery with a Surgical Intelligence platform\ - \ that leverages highly advanced AI, specifically machine learning and computer\ - \ vision technology, to analyze every step, event, milestone, and critical junction\ - \ of surgical procedures.\n\nOur platform analyzes lengthy surgical procedure\ - \ videos and extracts meaningful information, providing surgeons with highlight\ - \ reels of key moments in an operation, enhanced by annotations.\n\nAs the team\ - \ expanded, we realized that we were spending too much time manually running model\ - \ training and focusing on DevOps tasks and not enough time dedicated to core\ - \ research.\n\nTo face this, we build an automation framework composed of multiple\ - \ training pipelines using PyTorch and ClearML. Our framework automates and manages\ - \ our entire process, from model development to deployment to continuous training\ - \ for model improvement.\n\nNew data is now immediately processed and fed directly\ - \ into training pipelines \u2013 speeding up workflow, minimizing human error,\ - \ and freeing up our research team for more important tasks. Thus, enabling us\ - \ to scale our ML operation and deliver better models for our end users." - link: "" - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/I4.png - section: I4 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-I4.png - title: - How theator Built a Continuous Training Framework to Scale Up Its Surgical - Intelligence Platform -- authors: - - Cebere Bogdan - - Cebere Tudor - - Manolache Andrei - - Horia Paul-Ion - categories: - - Medical & Healthcare - description: - We present Q&Aid, a conversation agent that relies on a series of machine - learning models to filter, label, and answer medical questions based on a provided - image and text inputs. Q&Aid is simplifying the hospital logic backend by standardizing - it to a Health Intel Provider (HIP). A HIP is a collection of models trained on - local data that receives text and visual input, afterward filtering, labeling, - and feeding the data to the right models and generating at the end output for - the aggregator. Any hospital is identified as a HIP holding custom models and - labeling based on its knowledge. The hospitals are training and fine-tuning their - models, such as a Visual Question Answering (VQA) model, on private data (e.g. - brain anomaly segmentation). We aggregate all of the tasks that the hospitals - can provide into a single chat app, offering the results to the user. When the - chat ends, the transcript is forwarded to each hospital, a doctor being in charge - of the final decision. - link: https://qrgo.page.link/d1fQk - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/I5.png - section: I5 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-I5.png - title: "Q&Aid: A Conversation Agent Powered by PyTorch" -- authors: - - Jaden Hong - - Kevin Tran - - Tyler Lee - - Paul Lee - - Freddie Cha - - Louis Jung - - Dr. Jung Kyung Hong - - Dr. In-Young Yoon - - David Lee - categories: - - Medical & Healthcare - description: - "Sleep disorders and insomnia are now regarded as a worldwide problem.\ - \ Roughly 62% of adults worldwide feel that they don't sleep well. However, sleep\ - \ is difficult to track so it's not easy to get suitable treatment to improve\ - \ your sleep quality. Currently, the PSG (Polysomnography) is the only way to\ - \ evaluate the sleep quality accurately but it's expensive and often inaccurate\ - \ due to the first night effect. \n\nWe propose a multi-signal sleep stage classifier\ - \ for contactless sleep tracking: Sleepbot. By automating the manual PSG reading\ - \ and providing explainable analysis, Sleepbot opens a new possibility to apply\ - \ sleep staging AI in both home and hospital. With sound recorded by a smartphone\ - \ app and RF-sensed signal measured by Asleep's non-contact sleep tracker, Sleepbot\ - \ provides a clinical level of sleep stage classification. \n\nSleepbot achieved\ - \ 85.5 % accuracy in 5-class (Wake, N1, N2, N3, Rem) using PSG signals measured\ - \ from 3,700 subjects and 77 % accuracy in 3-class (Wake, Sleep, REM) classification\ - \ using only sound data measured from 1,2000 subjects." - link: "" - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/J4.png - section: J4 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-J4.png - title: "Sleepbot: Multi-signal Sleep Stage Classifier AI for hospital and home" -- authors: - - Akshay Agrawal - - Alnur Ali - - Stephen Boyd - categories: - - Medical & Healthcare - description: - "We present a unifying framework for the vector embedding problem: - given a set of items and some known relationships between them, we seek a representation - of the items by vectors, possibly subject to some constraints (e.g., requiring - the vectors to have zero mean and identity covariance). We want the vectors associated - with similar items to be near each other, and vectors associated with dissimilar - items to not be near, measured in Euclidean distance. We formalize this by introducing - distortion functions, defined for some pairs of the items. Our goal is to choose - an embedding that minimizes the total distortion, subject to the constraints. - We call this the minimum-distortion embedding (MDE) problem. The MDE framework - generalizes many well-known embedding methods, such as PCA, the Laplacian eigenmap, - multidimensional scaling, UMAP, and others, and also includes new types of embeddings. - - - Our accompanying software library, PyMDE, makes it easy for users to specify and - approximately solve MDE problems, enabling experimentation with well-known and - custom embeddings alike. By making use of automatic differentiation and hardware - acceleration via PyTorch, we are able to scale to very large embedding problems. - We will showcase examples of embedding real datasets, including an academic co-authorship - network, single-cell mRNA transcriptomes, US census data, and population genetics." - link: "" - section: J5 - title: "PyMDE: Minimum-Distortion Embedding" -- authors: - - "Fernando P\xE9rez-Garc\xEDa" - - Rachel Sparks - - "S\xE9bastien Ourselin" - categories: - - Medical & Healthcare - description: - "Processing of medical images such as MRI or CT presents unique challenges - compared to RGB images typically used in computer vision. These include a lack - of labels for large datasets, high computational costs, and metadata to describe - the physical properties of voxels. Data augmentation is used to artificially increase - the size of the training datasets. Training with image patches decreases the need - for computational power. Spatial metadata needs to be carefully taken into account - in order to ensure a correct alignment of volumes. - - - We present TorchIO, an open-source Python library to enable efficient loading, - preprocessing, augmentation and patch-based sampling of medical images for deep - learning. TorchIO follows the style of PyTorch and integrates standard medical - image processing libraries to efficiently process images during training of neural - networks. TorchIO transforms can be composed, reproduced, traced and extended. - We provide multiple generic preprocessing and augmentation operations as well - as simulation of MRI-specific artifacts. - - - TorchIO was developed to help researchers standardize medical image processing - pipelines and allow them to focus on the deep learning experiments. It encourages - open science, as it supports reproducibility and is version controlled so that - the software can be cited precisely. Due to its modularity, the library is compatible - with other frameworks for deep learning with medical images." - link: "" - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/K4.png - section: K4 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-K4.png - title: - "TorchIO: Pre-Processing & Augmentation of Medical Images for Deep Learning - Applications" -- authors: - - Laila Rasmy - - Ziqian Xie - - Degui Zhi - categories: - - Medical & Healthcare - description: - With the extensive use of electronic records and the availability of - historical patient information, predictive models that can help identify patients - at risk based on their history at an early stage can be a valuable adjunct to - clinician judgment. Deep learning models can better predict patients' outcomes - by consuming their medical history regardless of the length and the complexity - of such data. We used our Pytorch_EHR framework to train a model that can predict - COVID-19 patient's health outcomes on admission. We used the Cerner Real-world - COVID-19 (Q2) cohort which included information for 117,496 COVID patients from - 62 health systems. We used a cohort of 55,068 patients and defined our outcomes - including mortality, intubation, and hospitalization longer than 3 days as binary - outcomes. We feed the model with all diagnoses, medication, laboratory results, - and other clinical events information available before or on their first COVID-19 - encounter admission date. We kept the data preprocessing at a minimum for convenience - and practicality relying on the embedding layer that learns features representations - from the large training set. Our model showed improved performance compared to - other baseline machine learning models like logistic regression (LR). For in-hospital - mortality, our model showed AUROC of 89.5%, 90.6%, and 84.3% for in-hospital mortality, - intubation, and hospitalization for more than 3 days, respectively versus LR which - showed 82.8%, 83.2%, and 76.8% - link: https://github.com/ZhiGroup/pytorch_ehr - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/K5.png - section: K5 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-K5.png - title: Deep Learning Based Model to Predict Covid19 Patients' Outcomes on Admission -- authors: - - Binghui Ouyang - - "Alexander O\u2019Connor " - categories: - - NLP & Multimodal, RL & Time Series - description: - "While Transformers have brought unprecedented improvements in the\ - \ accuracy and ease of developing NLP applications, their deployment remains challenging\ - \ due to the large size of the models and their computational complexity. \n Indeed,\ - \ until recently is has been a widespread misconception that hosting high-performance\ - \ transformer-based models was prohibitively expensive, and technically challenging.\ - \ Fortunately, recent advances in both the PyTorch ecosystem and in custom hardware\ - \ for inference have created a world where models can be deployed in a cost-effective,\ - \ scalable way, without the need for complex engineering.\n\nIn this presentation,\ - \ we will discuss the use of PyTorch and AWS Inferentia to deploy production-scale\ - \ models in chatbot intent classification - a particularly relevant and demanding\ - \ scenario. \n\nAutodesk deploys a number of transformer based models to solve\ - \ customer support issues across our channels, and our ability to provide a flexible,\ - \ high-quality machine learning solution is supported by leveraging cutting-edge\ - \ technology such as transformer based classification. Our chatbot, AVA, responds\ - \ to tens of thousands of customer interactions monthly, and we are evolving our\ - \ architecture to be supported by customer inference.\n\nWe will discuss our experience\ - \ of piloting transformer-based intent models, and present a workflow for going\ - \ from data to deployment for similar projects." - link: "" - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/A1.png - section: A1 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-A1.png - title: " Rolling out Transformers with TorchScript and Inferentia" -- authors: - - Kashif Rasul - categories: - - NLP & Multimodal, RL & Time Series - description: - PyTorchTS is a PyTorch based Probabilistic Time Series forecasting - framework that comes with state of the art univariate and multivariate models. - link: https://github.com/zalandoresearch/pytorch-ts - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/A2.png - section: A2 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-A2.png - title: "PyTorchTS: PyTorch Probabilistic Time Series Forecasting Framework" -- authors: - - Sasha Sheng - - Amanpreet Singh - categories: - - NLP & Multimodal, RL & Time Series - description: - MMF is designed from ground up to let you focus on what matters -- - your model -- by providing boilerplate code for distributed training, common datasets - and state-of-the-art pretrained baselines out-of-the-box. MMF is built on top - of PyTorch that brings all of its power in your hands. MMF is not strongly opinionated. - So you can use all of your PyTorch knowledge here. MMF is created to be easily - extensible and composable. Through our modular design, you can use specific components - from MMF that you care about. Our configuration system allows MMF to easily adapt - to your needs. - link: "" - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/A3.png - section: A3 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-A3.png - title: "MMF: A modular framework for multimodal research" -- authors: - - Dirk Groeneveld - - Akshita Bhagia - - Pete Walsh - - Michael Schmitz - categories: - - NLP & Multimodal, RL & Time Series - description: - An Apache 2.0 NLP research library, built on PyTorch, for developing - state-of-the-art deep learning models on a wide variety of linguistic tasks. - link: https://github.com/allenai/allennlp - section: B1 - title: "AllenNLP: An NLP research library for developing state-of-the-art models" -- authors: - - John Trenkle - - Jaya Kawale & Tubi ML team - categories: - - NLP & Multimodal, RL & Time Series - description: - "Tubi is one of the leading platforms providing free high-quality streaming\ - \ movies and TV shows to a worldwide audience. We embrace a data-driven approach\ - \ and leverage advanced machine learning techniques using PyTorch to enhance our\ - \ platform and business in any way we can. The Three Pillars of AVOD are the\ - \ guiding principle for our work. The Pillars are \nContent: all the titles we\ - \ maintain in our library\nAudience: everyone who watches titles on Tubi\nAdvertising:\ - \ ads shown to viewers on behalf of brands\n\nIn this poster, we'll focus on the\ - \ Content aspect with more details for the various use cases especially Content\ - \ Understanding. Content is an important pillar of Tubi since to be successful,\ - \ we need to look at existing titles and beyond what we already have and attempt\ - \ to understand all of the titles out in the wild and how they could benefit our\ - \ platform in some fashion. Content Understanding revolves around digesting a\ - \ rich collection of 1st- and 3rd-party data in structured (metadata) and unstructured\ - \ (text) forms and developing representations that capture the essence of those\ - \ Titles. With the analogy of linear algebra, we can say we are attempting to\ - \ project Title vectors from the universe to our tubiverse with as much fidelity\ - \ as possible in order to ascertain potential value for each target use case.\ - \ We will describe several techniques to understand content better using Pytorch." - link: "" - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/B2.png - section: B2 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-B2.png - title: "Project Spock at Tubi: Understanding Content using Deep Learning for NLP" -- authors: - - Benoit Steiner - - Chris Cummins - - Horace He - - Hugh Leather - categories: - - NLP & Multimodal, RL & Time Series - description: - "As the usage of machine learning techniques is becoming ubiquitous,\ - \ the efficient execution of neural networks is crucial to many applications.\ - \ Frameworks, such as Halide and TVM, separate the algorithmic representation\ - \ of\nthe deep learning model from the schedule that determines its implementation.\ - \ Finding good schedules, however, remains extremely challenging. Auto-tuning\ - \ methods, which search the space of valid schedules and execute each candidate\ - \ on the hardware, identify some of the best performing schedules, but the search\ - \ can take hours, hampering the productivity of deep learning practitioners. What\ - \ is needed is a method that achieves a similar performance without extensive\ - \ search, delivering the needed efficiency quickly.\n\nUsing PyTorch, we model\ - \ the scheduling process as a sequence of optimization choices, and implement\ - \ a new technique to accurately predict the expected performance of a partial\ - \ schedule using a LSTM over carefully engineered features that describe each\ - \ DNN operator and their current scheduling choices. Leveraging these predictions\ - \ we are able to make these optimization decisions greedily and, without any executions\ - \ on the target hardware, rapidly identify an efficient schedule.\nThis techniques\ - \ enables to find schedules that improve the execution performance of deep neural\ - \ networks by 2.6\xD7 over Halide and 1.5\xD7 over TVM. Moreover, our technique\ - \ completes in seconds instead of hours, making it possible to include it as\ - \ a new backend for PyTorch itself." - link: http://facebook.ai - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/B3.png - section: B3 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-B3.png - title: RL Based Performance Optimization of Deep Neural Networks -- authors: - - Zhenghong Liu - categories: - - NLP & Multimodal, RL & Time Series - description: - Forte is an open-source toolkit for building Natural Language Processing - workflows via assembling state-of-the-art NLP and ML technologies. This toolkit - features composable pipeline, cross-task interaction, adaptable data-model interfaces. - The highly composable design allows users to build complex NLP pipelines of a - wide range of tasks including document retrieval, information extraction, and - text generation by combining existing toolkits or customized PyTorch models. The - cross-task interaction ability allows developers to utilize the results from individual - tasks to make informed decisions. The data-model interface helps developers to - focus on building reusable PyTorch models by abstracting out domain and preprocessing - details. We show that Forte can be used to build complex pipelines, and the resulting - pipeline can be easily adapted to different domains and tasks with small changes - in the code. - link: https://github.com/asyml/forte - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/C1.png - section: C1 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-C1.png - title: A Data-Centric Framework for Composable NLP -- authors: - - Shagun Sodhani - - Amy Zhang - - Ludovic Denoyer - - Pierre-Alexandre Kamienny - - Olivier Delalleau - categories: - - NLP & Multimodal, RL & Time Series - description: - "The two key components in a multi-task RL codebase are (i) Multi-task - RL algorithms and (ii) Multi-task RL environments. We develop open-source libraries - for both components. [MTRL](https://github.com/facebookresearch/mtrl) provides - components to implement multi-task RL algorithms, and [MTEnv](https://github.com/facebookresearch/mtenv) - is a library to interface with existing multi-task RL environments and create - new ones. - - - MTRL has two building blocks: (i) single task policy and (ii) components to augment - the single-task policy for multi-task setup. The ideal workflow is to start with - a base policy and add multi-task components as they seem fit. MTRL enables algorithms - like GradNorm, Distral, HiPBMDP, PCGrad, Soft Modularization, etc. - - - MTEnv is an effort to standardize multi-task RL environments and provide better - benchmarks. We extend the Gym API to support multiple tasks, with two guiding - principles: (i) Make minimal changes to the Gym Interface (which the community - is very familiar with) and (ii) Make it easy to port existing environments to - MTEnv. Additionally, we provide a collection of commonly used multi-task RL environments - (Acrobot, Cartpole, Multitask variant of DeepMind Control Suite, Meta-World, Multi-armed - Bandit, etc.). The RL practitioner can combine its own environments with the MTEnv - wrappers to add multi-task support with a small code change. - - - MTRL and MTEnv are used in several ongoing/published works at FAIR." - link: http://qr.w69b.com/g/tGZSFw33G - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/C2.png - section: C2 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-C2.png - title: Environments and Baselines for Multitask Reinforcement Learning -- authors: - - Lysandre Debut - - Sylvain Gugger - - "Quentin Lhoest\_" - categories: - - NLP & Multimodal, RL & Time Series - description: - "Transfer learning has become the norm to get state-of-the-art results - in NLP. Hugging Face provides you with tools to help you on every step along the - way: - - - - A free git-based shared hub with more than 7,500 PyTorch checkpoints, and more - than 800 NLP datasets. - - - The ? Datasets library, to easily download the dataset, manipulate it and prepare - it. - - - The ? Tokenizers library, that provides ultra-fast tokenizers backed by Rust, - and converts text in PyTorch tensors. - - - The ? Transformers library, providing more than 45 PyTorch implementations of - Transformer architectures as simple nn.Module as well as a training API. - - - The ? Accelerate library, a non-intrusive API that allows you to run your raw - training loop on any distributed setup. - - - The pipeline is then simply a six-step process: select a pretrained model from - the hub, handle the data with Datasets, tokenize the text with Tokenizers, load - the model with Transformers, train it with the Trainer or your own loop powered - by Accelerate, before sharing your results with the community on the hub." - link: https://huggingface.co/models - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/C3.png - section: C3 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-C3.png - title: The Hugging Face Ecosystem -- authors: - - Manuel Pariente - - Samuele Cornell - - Jonas Haag - - Joris Cosentino - - Michel Olvera - - "Fabian-Robert St\xF6ter" - - Efthymios Tzinis - categories: - - NLP & Multimodal, RL & Time Series - description: - Asteroid is an audio source separation toolkit built with PyTorch and - PyTorch-Lightning. Inspired by the most successful neural source separation systems, - it provides all neural building blocks required to build such a system. To improve - reproducibility, recipes on common audio source separation datasets are provided, - including all the steps from data download\preparation through training to evaluation - as well as many current state-of-the-art DNN models. Asteroid exposes all levels - of granularity to the user from simple layers to complete ready-to-use models. - Our pretrained models are hosted on the asteroid-models community in Zenodo and - on the Huggingface model Hub. Loading and using pretrained models is trivial and - sharing them is also made easy with asteroid's CLI.","poster_showcase":"Audio - Source Separation, Speech Processing, Deep Learning","email":"cornellsamuele@gmail.com"} - link: https://asteroid-team.github.io/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/D1.png - section: D1 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-D1.png - title: "\_Asteroid: the Pytorch-based Audio Source Separation Toolkit for Researchers" -- authors: - - Ludovic Denoyer - - Danielle Rothermel - - Xavier Martinet - categories: - - NLP & Multimodal, RL & Time Series - description: - RLStructures is a lightweight Python library that provides simple APIs - as well as data structures that make as few assumptions as possible about the - structure of your agent or your task, while allowing for transparently executing - multiple policies on multiple environments in parallel (incl. multiple GPUs). - It thus facilitates the implementation of RL algorithms while avoiding complex - abstractions. - link: "" - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/D2.png - section: D2 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-D2.png - title: "rlstructures: A Lightweight Python Library for Reinforcement Learning Research" -- authors: - - Luis Pineda - - Brandon Amos - - Amy Zhang - - Nathan O. Lambert - - Roberto Calandra - categories: - - NLP & Multimodal, RL & Time Series - description: - Model-based reinforcement learning (MBRL) is an active area of research - with enormous potential. In contrast to model-free RL, MBRL algorithms solve tasks - by learning a predictive model of the task dynamics, and use this model to predict - the future and facilitate decision making. Many researchers have argued that MBRL - can result in lower sample complexity, better generalization, as well as safer - and more interpretable decisions. However, despite the surge in popularity and - great potential of MBRL, there is currently no widely accepted library for facilitating - research in this area. Since MBRL methods often involve the interplay of complex - components such as probabilistic ensembles, latent variable models, planning algorithms, - and even model-free methods, the lack of such a library raises the entry bar to - the field and slows down research efforts. In this work we aim to solve this problem - by introducing MBRL-Lib, a modular PyTorch toolbox specifically designed for facilitating - research on model-based reinforcement learning. MBRL-Lib provides interchangeable - options for training dynamics models and running planning algorithms, which can - then be used in a mix and match fashion to create novel MBRL methods. The library - also provides a set of utility functions to run common MBRL tasks, as well a set - of diagnostics tools to identify potential issues while training dynamics models - and control algorithms. - link: https://github.com/facebookresearch/mbrl-lib - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/D3.png - section: D3 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-D3.png - title: "MBRL-Lib: a PyTorch toolbox for model-based reinforcement learning research" -- authors: - - Geeta Chauhan - - Gisle Dankel - - Elena Neroslavaskaya - categories: - - Performance & Profiler - description: - Analyzing and improving large-scale deep learning model performance - is an ongoing challenge that continues to grow in importance as the model sizes - increase. Microsoft and Facebook collaborated to create a native PyTorch performance - debugging tool called PyTorch Profiler. The profiler builds on the PyTorch autograd - profiler foundation, adds a new high fidelity GPU profiling engine, and out-of-the-box - bottleneck analysis tool in Tensorboard. New Profiler delivers the simplest experience - available to date where users can profile their models without installing any - additional packages and see results immediately in Tensorboard. Until today, beginner - users of PyTorch may not have attempted to profile their models due to the task - complexity. With the new bottleneck analysis tool, they will find profiling easy - and accessible. Experienced users will be delighted by the detailed trace views - which illustrate GPU kernel execution events and their relationship to the PyTorch - operations. Come learn how to profile your PyTorch models using this new delightfully - simple tool. - link: https://pytorch.org/blog/introducing-pytorch-profiler-the-new-and-improved-performance-tool - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/H6.png - section: H6 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-H6.png - title: Introducing New PyTorch Profiler -- authors: - - Naren Dasan - categories: - - Performance & Profiler - description: - For experimentation and the development of machine learning models, - few tools are as approachable as PyTorch. However, when moving from research to - production, some of the features that make PyTorch great for development make - it hard to deploy. With the introduction of TorchScript, PyTorch has solid tooling - for addressing some of the problems of deploying PyTorch models. TorchScript removes - the dependency on Python and produces portable, self contained, static representations - of code and weights. But in addition to portability, users also look to optimize - performance in deployment. When deploying on NVIDIA GPUs, TensorRT, NVIDIA's deep - learning optimizer, provides the capability to maximize performance of workloads - by tuning the execution of models for specific target hardware. TensorRT also - provides tooling for conducting further optimization through mixed and reduced - precision execution and post training quantization (PTQ). We present TRTorch, - a compiler for PyTorch and TorchScript targeting NVIDIA GPUs, which combines the - usability of PyTorch with the performance of TensorRT and allows users to fully - optimize their inference workloads without leaving the PyTorch ecosystem. It also - simplifies conducting complex optimizations like PTQ by leveraging common PyTorch - tooling. TRTorch can be used directly from PyTorch as a TorchScript Backend, embedded - in an application or used from the command line to easily increase the performance - of inference applications. - link: https://nvidia.github.io/TRTorch/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/I6.png - section: I6 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-I6.png - title: "TRTorch: A Compiler for TorchScript Targeting NVIDIA GPUs with TensorRT" -- authors: - - Charles H. Martin - categories: - - Performance & Profiler - description: - "WeightWatcher (WW) is an open-source, diagnostic tool for analyzing\ - \ Deep Neural Networks (DNN), without needing access to training or even test\ - \ data. It can be used to: analyze pre/trained pyTorch models; \ninspect models\ - \ that are difficult to train; gauge improvements in model performance; predict\ - \ test accuracies across different models; and detect potential problems when\ - \ compressing or fine-tuning pretrained models.\n\nWeightWatcher is based on theoretical\ - \ research (done in\\-joint with UC Berkeley) into \"Why Deep Learning Works\"\ - , using ideas from Random Matrix Theory (RMT), Statistical Mechanics, and Strongly\ - \ Correlated Systems." - link: "" - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/J6.png - section: J6 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-J6.png - title: "WeightWatcher: A Diagnostic Tool for DNNs" -- authors: - - Mario Lezcano-Casado - categories: - - Performance & Profiler - description: - '"This poster presents the ""parametrizations"" feature that will be - added to PyTorch in 1.9.0. - - This feature allows for a simple implementation of methods like pruning, weight_normalization - or spectral_normalization. - - More generally, it implements a way to have ""computed parameters"". This means - that we replace a parameter `weight` in a layer with `f(weight)`, where `f` is - an arbitrary module. In other words, after putting a parametrization `f` on `layer.weight`, - `layer.weight` will return `f(weight)`. - - They implement a caching system, so that the value `f(weight)` is computed just - once during the forward pass. - - A module that implements a parametrisation may also have a `right_inverse` method. - If this method is present, it is possible to assign to a parametrised tensor. - This is useful when initialising a parametrised tensor. - - This feature can be seen as a first step towards invertible modules. In particular, - it may also help making distributions first-class citizens in PyTorch. - - Parametrisations also allows for a simple implementation of constrained optimisation. - From this perspective, parametrisation maps an unconstrained tensor to a constrained - space such as the space of orthogonal matrices, SPD matrices, low-rank matrices... - This approach is implemented in the library GeoTorch (https://github.com/Lezcano/geotorch/)."' - link: "" - section: K6 - title: Constrained Optimization in PyTorch 1.9 Through Parametrizations -- authors: - - Richard Liaw - - Kai Fricke - - Amog Kamsetty - - Michael Galarnyk - categories: - - Platforms & Ops & Tools - description: - Ray is a popular framework for distributed Python that can be paired - with PyTorch to rapidly scale machine learning applications. Ray contains a large - ecosystem of applications and libraries that leverage and integrate with Pytorch. - This includes Ray Tune, a Python library for experiment execution and hyperparameter - tuning at any scale; RLlib, a state-of-the-art library for reinforcement learning; - and Ray Serve, a library for scalable model serving. Together, Ray and Pytorch - are becoming the core foundation for the next generation of production machine - learning platforms. - link: https://ray.io/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/H1.png - section: H1 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-H1.png - title: Distributed Pytorch with Ray -- authors: - - Vincenzo Lomonaco - - Lorenzo Pellegrini Andrea Cossu - - Antonio Carta - - Gabriele Graffieti - categories: - - Platforms & Ops & Tools - description: - Learning continually from non-stationary data stream is a long sought - goal of machine learning research. Recently, we have witnessed a renewed and fast-growing - interest in Continual Learning, especially within the deep learning community. - However, algorithmic solutions are often difficult to re-implement, evaluate and - port across different settings, where even results on standard benchmarks are - hard to reproduce. In this work, we propose an open-source, end-to-end library - for continual learning based on PyTorch that may provide a shared and collaborative - code-base for fast prototyping, training and reproducible evaluation of continual - learning algorithms. - link: https://avalanche.continualai.org - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/H2.png - section: H2 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-H2.png - title: "Avalanche: an End-to-End Library for Continual Learning based on PyTorch" -- authors: - - Hong Xu - categories: - - Platforms & Ops & Tools - description: - IBM Z is a hardware product line for mission-critical applications, - such as finance and health applications. It employs its own CPU architecture, - which PyTorch does not officially support. In this poster, we discuss why it is - important to support PyTorch on Z. Then, we show our prebuilt minimal PyTorch - package for IBM Z. Finally, we demonstrate our continuing commitment to make more - PyTorch features available on IBM Z. - link: https://codait.github.io/pytorch-on-z - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/H3.png - section: H3 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-H3.png - title: PyTorch on IBM Z and LinuxONE (s390x) -- authors: - - Dr. Ariel Biller - categories: - - Platforms & Ops & Tools - description: - "Both from sanity considerations and the productivity perspective,\ - \ Data Scientists, ML engineers, Graduate students, and other research-facing\ - \ roles are all starting to adopt best-practices from production-grade MLOps.\n\ - \nHowever, most toolchains come with a hefty price of extra code and maintenance,\ - \ which reduces the actual time available for R&D. We will show an alternative\ - \ approach using ClearML, the open-source MLOps solution.\n\nIn this \"best-practices\"\ - \ poster, we will overview the \"must-haves\" of R&D-MLOPs: \nOrchestration, Automation,\ - \ and Reproducibility. These enable easy remote execution through magically reproducible\ - \ setups and even custom, reusable, bottom-up pipelines.\n\nWe will take a single\ - \ example and schematically transform it from the \"as downloaded from GitHub\"\ - \ stage to a fully-fledged, scalable, version-controlled, parameterizable R&D\ - \ pipeline. We will measure the number of changes needed to the codebase and provide\ - \ evidence of real low-cost integration. All code, logs, and metrics will be available\ - \ as supporting information." - link: "" - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/I1.png - section: I1 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-I1.png - title: "The Fundamentals of MLOps for R&D: Orchestration, Automation, Reproducibility" -- authors: - - Masashi Sode - - Akihiko Fukuchi - - Yoki Yabe - - Yasufumi Nakata - categories: - - Platforms & Ops & Tools - description: - "Is your machine learning model fair enough to be used in your system?\ - \ What if a recruiting AI discriminates on gender and race? What if the accuracy\ - \ of medical AI depends on a person's annual income or on the GDP of the country\ - \ where it is used? Today's AI has the potential to cause such problems. In recent\ - \ years, fairness in machine learning has received increasing attention. If current\ - \ machine learning models used for decision making may cause unfair discrimination,\ - \ developing a fair machine learning model is an important goal in many areas,\ - \ such as medicine, employment, and politics. Despite the importance of this goal\ - \ to society, as of 2020, there was no PyTorch\xB9 project incorporating fairness\ - \ into a machine learning model. To solve this problem, we created FairTorch at\ - \ the PyTorch Summer Hackathon 2020.\n\nFairTorch provides a tool to mitigate\ - \ the unfairness of machine learning models. A unique feature of our tool is that\ - \ it allows you to add a fairness constraint to your model by adding only a few\ - \ lines of code, using the fairness criteria provided in the library." - link: https://github.com/wbawakate/fairtorch - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/I2.png - section: I2 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-I2.png - title: "FairTorch: Aspiring to Mitigate the Unfairness of Machine Learning Models" -- authors: - - Thomas Viehmann - - Luca Antiga - categories: - - Platforms & Ops & Tools - description: - "When machine learning models are deployed to solve a given task, a - crucial question is whether they are actually able to perform as expected. TorchDrift - addresses one aspect of the answer, namely drift detection, or whether the information - flowing through our models - either probed at the input, output or somewhere in-between - - is still consistent with the one it was trained and evaluated on. In a nutshell, - TorchDrift is designed to be plugged into PyTorch models and check whether they - are operating within spec. - - TorchDrift's principles apply PyTorch's motto _from research to production_ - to drift detection: We provide a library of methods that canbe used as baselines - or building blocks for drift detection research, as well as provide practitioners - deploying PyTorch models in production with up-to-date methods and educational - material for building the necessary statistical background. Here we introduce - TorchDrift with an example illustrating the underlying two-sample tests. We show - how TorchDrift can be integrated in high-performance runtimes such as TorchServe - or RedisAI, to enable drift detection in real-world applications thanks to the - PyTorch JIT." - link: https://torchdrift.org/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/I3.png - section: I3 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-I3.png - title: "TorchDrift: Drift Detection for PyTorch" -- authors: - - Quincy Chen - - Arjun Bhargava - - Sudeep Pillai - - Marcus Pan - - Chao Fang - - Chris Ochoa - - Adrien Gaidon - - Kuan-Hui Lee - - Wolfram Burgard - categories: - - Platforms & Ops & Tools - description: - "Modern machine learning for autonomous vehicles requires a fundamentally\ - \ different infrastructure and production lifecycle from their standard software\ - \ continuous-integration/continuous-deployment counterparts. At Toyota Research\ - \ Institute (TRI), we have developed \u200BOuroboros\u200B - a modern ML platform\ - \ that supports the end-to-end lifecycle of all ML models delivered to TRI's autonomous\ - \ vehicle fleets. We envision that all ML models delivered to our fleet undergo\ - \ a systematic and rigorous treatment. Ouroboros delivers several essential features\ - \ including:\na. ML dataset governance and infrastructure-as-code\u200B that ensures\ - \ the traceability, reproducibility, standardization, and fairness for all ML\ - \ datasets and models procedurally generated and delivered to the TRI fleet.\n\ - b. Unified ML dataset and model management:\u200B An unified and streamlined workflow\ - \ for ML dataset curation, label management, and model development that supports\ - \ several key ML models delivered to the TRI fleet today\nc. A Large-scale Multi-task,\ - \ Multi-modal Dataset for Automated Driving\u200B that supports the development\ - \ of various models today, including 3D object detection, 2D object detection,\ - \ 2D BeVFlow, Panoptic Segmentation;\nd. Orchestrated ML workflows\u200B to stand\ - \ up scalable ML applications such as push-button re-training solutions, ML CI/CDs\ - \ pipelines, Dataset Curation workflows, Auto-labelling pipelines, leveraging\ - \ the most up-to-date cloud tools available. along their lifecycles, ensuring\ - \ strong governance on building reusable, reproducible, robust, traceable, and\ - \ fair ML models for the production driving setting. By following the best MLOps\ - \ practices, we expect our platform to lay the foundation for continuous life-long\ - \ learning in our autonomous vehicle fleets and accelerate the transition from\ - \ research to production." - link: https://github.com/TRI-ML - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/J1.png - section: J1 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-J1.png - title: "Ouroboros: MLOps for Automated Driving" -- authors: - - Yujian He - categories: - - Platforms & Ops & Tools - description: - carefree-learn makes PyTorch accessible to people who are familiar - with machine learning but not necessarily PyTorch. By having already implemented - all the pre-processing and post-processing under the hood, users can focus on - implementing the core machine learning algorithms / models with PyTorch and test - them on various datasets. By having designed the whole structure carefully, users - can easily customize every block in the whole pipeline, and can also 'combine' - the implemented blocks to 'construct' new models without efforts. By having carefully - made abstractions users can adapt it to their specific down-stream tasks, such - as quantitative trading (in fact I've already implemented one for my company and - it works pretty well XD). carefree-learn handles distributed training carefully, - so users can either run multiple tasks at the same time, or run a huge model with - DDP in one line of code. carefree-learn also integrates with mlflow and supports - exporting to ONNX, which means it is ready for production to some extend. - link: "" - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/J2.png - section: J2 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-J2.png - title: "carefree-learn: Tabular Datasets \u2764\uFE0F PyTorch" -- authors: - - Wenwei Zhang - categories: - - Platforms & Ops & Tools - description: - "OpenMMLab project builds open-source toolboxes for Artificial Intelligence - (AI). It aims to 1) provide high-quality codebases to reduce the difficulties - in algorithm reimplementation; 2) provide a complete research platform to accelerate - the research production; and 3) shorten the gap between research production to - the industrial applications. Based on PyTorch, OpenMMLab develops MMCV to provide - unified abstract training APIs and common utils, which serves as a foundation - of 15+ toolboxes and 40+ datasets. - - - Since the initial release in October 2018, OpenMMLab has released 15+ toolboxes - that cover 10+ directions, implement 100+ algorithms, and contain 1000+ pre-trained - models. With a tighter collaboration with the community, OpenMMLab will release - more toolboxes with more flexible and easy-to-use training frameworks in the future." - link: https://openmmlab.com/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/J3.png - section: J3 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-J3.png - title: "OpenMMLab: An Open-Source Algorithm Platform for Computer Vision" -- authors: - - Sergey Kolesnikov - categories: - - Platforms & Ops & Tools - description: - "For the last three years, Catalyst-Team and collaborators have been\ - \ working on Catalyst\u200A - a high-level PyTorch framework Deep Learning Research\ - \ and Development. It focuses on reproducibility, rapid experimentation, and codebase\ - \ reuse so you can create something new rather than write yet another train loop.\ - \ You get metrics, model checkpointing, advanced logging, and distributed training\ - \ support without the boilerplate and low-level bugs." - link: https://catalyst-team.com - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/K2.png - section: K2 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-K2.png - title: "Catalyst \u2013 Accelerated deep learning R&D" -- authors: - - Anton Obukhov - categories: - - Platforms & Ops & Tools - description: - "Evaluation of generative models such as GANs is an important part\ - \ of deep learning research. In 2D image generation, three approaches became widely\ - \ spread: Inception Score, Fr\xE9chet Inception Distance, and Kernel Inception\ - \ Distance. Despite having a clear mathematical and algorithmic description, these\ - \ metrics were initially implemented in TensorFlow and inherited a few properties\ - \ of the framework itself, such as a specific implementation of the interpolation\ - \ function. These design decisions were effectively baked into the evaluation\ - \ protocol and became an inherent part of the specification of the metrics. As\ - \ a result, researchers wishing to compare against state of the art in generative\ - \ modeling are forced to perform an evaluation using the original metric authors'\ - \ codebases. Reimplementations of metrics in PyTorch and other frameworks exist,\ - \ but they do not provide a proper level of fidelity, thus making them unsuitable\ - \ for reporting results and comparing them to other methods. This software aims\ - \ to provide epsilon-exact implementations of the said metrics in PyTorch and\ - \ remove inconveniences associated with generative model evaluation and development.\ - \ All the evaluation pipeline steps are correctly tested, with relative errors\ - \ and sources of remaining non-determinism summarized in sections below.\nTLDR;\ - \ fast and reliable GAN evaluation in PyTorch" - link: https://github.com/toshas/torch-fidelity - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/K3.png - section: K3 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-K3.png - title: High-fidelity performance metrics for generative models in PyTorch -- authors: - - Jona Raphael (jona@skytruth.org) - - Ben Eggleston - - Ryan Covington - - Tatianna Evanisko - - John Amos - categories: - - Vision - description: - "Operational oil discharges from ships, also known as \"bilge dumping,\"\ - \ have been identified as a major source of petroleum products entering our oceans,\ - \ cumulatively exceeding the largest oil spills, such as the Exxon Valdez and\ - \ Deepwater Horizon spills, even when considered over short time spans. However,\ - \ we still don't have a good estimate of\n\u25CF How much oil is being discharged;\n\ - \u25CF Where the discharge is happening;\n\u25CF Who the responsible vessels are.\n\ - This makes it difficult to prevent and effectively respond to oil pollution that\ - \ can damage our marine and coastal environments and economies that depend on\ - \ them.\n\nIn this poster we will share SkyTruth's recent work to address these\ - \ gaps using machine learning tools to detect oil pollution events and identify\ - \ the responsible vessels when possible. We use a convolutional neural network\ - \ (CNN) in a ResNet-34 architecture to perform pixel segmentation on all incoming\ - \ Sentinel-1 synthetic aperture radar (SAR) imagery to classify slicks. Despite\ - \ the satellites' incomplete oceanic coverage, we have been detecting an average\ - \ of 135 vessel slicks per month, and have identified several geographic hotspots\ - \ where oily discharges are occurring regularly. For the images that capture a\ - \ vessel in the act of discharging oil, we rely on an Automatic Identification\ - \ System (AIS) database to extract details about the ships, including vessel type\ - \ and flag state. We will share our experience\n\u25CF Making sufficient training\ - \ data from inherently sparse satellite image datasets;\n\u25CF Building a computer\ - \ vision model using PyTorch and fastai;\n\u25CF Fully automating the process\ - \ in the Amazon Web Services (AWS) cloud.\nThe application has been running continuously\ - \ since August 2020, has processed over 380,000 Sentinel-1 images, and has populated\ - \ a database with more than 1100 high-confidence slicks from vessels. We will\ - \ be discussing preliminary results from this dataset and remaining challenges\ - \ to be overcome.\nLearn more at https://skytruth.org/bilge-dumping/" - link: "" - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/A6.png - section: A6 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-A6.png - title: Using Satellite Imagery to Identify Oceanic Oil Pollution -- authors: - - Tanishq Abraham - categories: - - Vision - description: - Unpaired image-to-image translation algorithms have been used for various - computer vision tasks like style transfer and domain adaption. Such algorithms - are highly attractive because they alleviate the need for the collection of paired - datasets. In this poster, we demonstrate UPIT, a novel fastai/PyTorch package - (built with nbdev) for unpaired image-to-image translation. It implements various - state-of-the-art unpaired image-to-image translation algorithms such as CycleGAN, - DualGAN, UNIT, and more. It enables simple training and inference on unpaired - datasets. It also comes with implementations of commonly used metrics like FID, - KID, and LPIPS. It also comes with Weights-and-Biases integration for easy experiment - tracking. Since it is built on top of fastai and PyTorch, it comes with support - for mixed-precision and multi-GPU training. It is highly flexible, and custom - dataset types, models, and metrics can be used as well. With UPIT, training and - applying unpaired image-to-image translation only takes a few lines of code. - link: https://github.com/tmabraham/UPIT - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/A7.png - section: A7 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-A7.png - title: "UPIT: A fastai Package for Unpaired Image-to-Image Translation" -- authors: - - Aaron Adcock - - Bo Xiong - - Christoph Feichtenhofer - - Haoqi Fan - - Heng Wang - - Kalyan Vasudev Alwala - - Matt Feiszli - - Tullie Murrell - - Wan-Yen Lo - - Yanghao Li - - Yilei Li - - "Zhicheng Yan " - categories: - - Vision - description: - PyTorchVideo is the new Facebook AI deep learning library for video - understanding research. It contains variety of state of the art pretrained video - models, dataset, augmentation, tools for video understanding. PyTorchVideo provides - efficient video components on accelerated inference on mobile device. - link: https://pytorchvideo.org/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/A8.png - section: A8 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-A8.png - title: "PyTorchVideo: A Deep Learning Library for Video Understanding" -- authors: - - A. Speiser - - "L-R. M\xFCller" - - P. Hoess - - U. Matti - - C. J. Obara - - J. H. Macke - - J. Ries - - S. C. Turaga - categories: - - Vision - description: - Single-molecule localization microscopy (SMLM) has had remarkable success - in imaging cellular structures with nanometer resolution, but the need for activating - only single isolated emitters limits imaging speed and labeling density. Here, - we overcome this major limitation using deep learning. We developed DECODE, a - computational tool that can localize single emitters at high density in 3D with - the highest accuracy for a large range of imaging modalities and conditions. In - a public software benchmark competition, it outperformed all other fitters on - 12 out of 12 data-sets when comparing both detection accuracy and localization - error, often by a substantial margin. DECODE allowed us to take live-cell SMLM - data with reduced light exposure in just 3 seconds and to image microtubules at - ultra-high labeling density. Packaged for simple installation and use, DECODE - will enable many labs to reduce imaging times and increase localization density - in SMLM. - link: http://github.com/turagalab/decode - section: B6 - title: - Deep Learning Enables Fast and Dense Single-Molecule Localization with High - Accuracy -- authors: - - "Abraham S\xE1nchez" - - Guillermo Mendoza - - "E. Ulises Moya-S\xE1nchez" - categories: - - Vision - description: - "We draw inspiration from the cortical area V1. We try to mimic their - main processing properties by means of: quaternion local phase/orientation to - compute lines and edges detection in a specific direction. We analyze how this - layer is robust by its greometry to large illumination and brightness changes." - link: https://gitlab.com/ab.sanchezperez/pytorch-monogenic - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/B7.png - section: B7 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-B7.png - title: A Robust PyTorch Trainable Entry Convnet Layer in Fourier Domain -- authors: - - "Fran\xE7ois-Guillaume Fernandez" - - Mateo Lostanlen - - Sebastien Elmaleh - - Bruno Lenzi - - Felix Veith - - and more than 15+ contributors - categories: - - Vision - description: - '"PyroNear is non-profit organization composed solely of volunteers - which was created in late 2019. Our core belief is that recent technological developments - can support the cohabitation between mankind & its natural habitat. We strive - towards high-performing, accessible & affordable tech-solutions for protection - against natural hazards. More specifically, our first efforts are focused on wildfire - protection by increasing the coverage of automatic detection systems. - - - Our ongoing initiative has now gathered dozens of volunteers to put up the following - main contributions: - - - Computer Vision: compiling open-source models and datasets (soon to be published) - for vision tasks related to wildfire detection - - - Edge Computing: developing an affordable physical prototype running our PyTorch - model on a Raspberry Pi - - - End-to-end detection workflow: building a responsible end-to-end system for - large scale detection and alert management (API, front-end monitoring platform) - - - Deployment: working with French firefighter departments to gather field knowledge - and conduct a test phase over the incoming European summer." - - PyTorch3D is a modular and optimized library for 3D Deep Learning with PyTorch. - It includes support for: data structures for heterogeneous batching of 3D data - (Meshes, Point clouds and Volumes), optimized 3D operators and loss functions - (with custom CUDA kernels), a modular differentiable rendering API for Meshes, - Point clouds and Implicit functions, as well as several other tools for 3D Deep - Learning.' - link: https://github.com/pyronear - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/B8.png - section: B8 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-B8.png - title: "PyroNear: Embedded Deep Learning for Early Wildfire Detection" -- authors: - - Nikhila Ravi - - Jeremy Reizenstein - - David Novotny - - Justin Johnson - - Georgia Gkioxari - - Roman Shapovalov - - Patrick Labatut - - Wan-Yen Lo - categories: - - Vision - description: - "PyTorch3D is a modular and optimized library for 3D Deep Learning - with PyTorch. It includes support for: data structures for heterogeneous batching - of 3D data (Meshes, Point clouds and Volumes), optimized 3D operators and loss - functions (with custom CUDA kernels), a modular differentiable rendering API for - Meshes, Point clouds and Implicit functions, as well as several other tools for - 3D Deep Learning." - link: https://arxiv.org/abs/2007.08501 - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/C6.png - section: C6 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-C6.png - title: "PyTorch3D: Fast, Flexible, 3D Deep Learning " -- authors: - - E. Riba - - J. Shi - - D. Mishkin - - L. Ferraz - - A. Nicolao - categories: - - Vision - description: - This work presents Kornia, an open source computer vision library built - upon a set of differentiable routines and modules that aims to solve generic computer - vision problems. The package uses PyTorch as its main backend, not only for efficiency - but also to take advantage of the reverse auto-differentiation engine to define - and compute the gradient of complex functions. Inspired by OpenCV, Kornia is composed - of a set of modules containing operators that can be integrated into neural networks - to train models to perform a wide range of operations including image transformations,camera - calibration, epipolar geometry, and low level image processing techniques, such - as filtering and edge detection that operate directly on high dimensional tensor - representations on graphical processing units, generating faster systems. Examples - of classical vision problems implemented using our framework are provided including - a benchmark comparing to existing vision libraries. - link: http://www.kornia.org - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/C7.png - section: C7 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-C7.png - title: "Kornia: an Open Source Differentiable Computer Vision Library for PyTorch" -- authors: - - Thomas George - categories: - - Vision - description: - Fisher Information Matrices (FIM) and Neural Tangent Kernels (NTK) - are useful tools in a number of diverse applications related to neural networks. - Yet these theoretical tools are often difficult to implement using current libraries - for practical size networks, given that they require per-example gradients, and - a large amount of memory since they scale as the number of parameters (for the - FIM) or the number of examples x cardinality of the output space (for the NTK). - NNGeometry is a PyTorch library that offers a high level API for computing various - linear algebra operations such as matrix-vector products, trace, frobenius norm, - and so on, where the matrix is either the FIM or the NTK, leveraging recent advances - in approximating these matrices. - link: https://github.com/tfjgeorge/nngeometry/ - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/C8.png - section: C8 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-C8.png - title: - "NNGeometry: Easy and Fast Fisher Information Matrices and Neural Tangent - Kernels in PyTorch" -- authors: - - "B\xE9gaint J." - - "Racap\xE9 F." - - Feltman S. - - Pushparaja A. - categories: - - Vision - description: - CompressAI is a PyTorch library that provides custom operations, layers, - modules and tools to research, develop and evaluate end-to-end image and video - compression codecs. In particular, CompressAI includes pre-trained models and - evaluation tools to compare learned methods with traditional codecs. State-of-the-art - end-to-end compression models have been reimplemented in PyTorch and trained from - scratch, reproducing published results and allowing further research in the domain. - link: "" - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/D6.png - section: D6 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-D6.png - title: "CompressAI: a research library and evaluation platform for end-to-end compression " -- authors: - - Philip Meier - - Volker Lohweg - categories: - - Vision - description: - "The seminal work of Gatys, Ecker, and Bethge gave birth to the field\ - \ of _Neural Style Transfer_ (NST) in 2016. An NST describes the merger between\ - \ the content and artistic style of two arbitrary images. This idea is nothing\ - \ new in the field of Non-photorealistic rendering (NPR). What distinguishes NST\ - \ from traditional NPR approaches is its generality: an NST only needs a single\ - \ arbitrary content and style image as input and thus \"makes -- for the first\ - \ time -- a generalized style transfer practicable\". Besides peripheral tasks,\ - \ an NST at its core is the definition of an optimization criterion called _perceptual\ - \ loss_, which estimates the perceptual quality of the stylized image. Usually\ - \ the perceptual loss comprises a deep neural network that needs to supply encodings\ - \ of images from various depths. \n\n`pystiche` is a library for NST written in\ - \ Python and built upon PyTorch. It provides modular and efficient implementations\ - \ for commonly used perceptual losses as well as neural net architectures. This\ - \ enables users to mix current state-of-the-art techniques with new ideas with\ - \ ease. This poster will showcase the core concepts of `pystiche` that will enable\ - \ other researchers as well as lay persons to got an NST running in minutes." - link: https://github.com/pmeier/pystiche - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/D7.png - section: D7 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-D7.png - title: "pystiche: A Framework for Neural Style Transfer" -- authors: - - Siddhish Thakur - categories: - - Vision - description: - " Deep Learning (DL) has greatly highlighted the potential impact of - optimized machine learning in both the scientific - - and clinical communities. The advent of open-source DL libraries from major industrial - entities, such as TensorFlow - - (Google), PyTorch (Facebook), further contributes to DL promises on the democratization - of computational analytics. However, increased technical and specialized background - is required to develop DL algorithms, and the variability of implementation details - hinders their reproducibility. Towards lowering the barrier and making the mechanism - of DL development, training, and inference more stable, reproducible, and scalable, - without requiring an extensive technical background, this manuscript proposes - the Generally Nuanced Deep Learning Framework (GaNDLF). With built-in support - for k-fold cross-validation, data augmentation, multiple modalities and output - classes, and multi-GPU training, as well as the ability to work with both radiographic - and histologic imaging, GaNDLF aims to provide an end-to-end solution for all - DL-related tasks, to tackle problems in medical imaging and provide a robust application - framework for deployment in clinical workflows. - - - Keywords: Deep Learning, Framework, Segmentation, Regression, Classification, - Cross-validation, Data - - augmentation, Deployment, Clinical, Workflows" - link: "" - poster_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/D8.png - section: D8 - thumbnail_link: https://s3.amazonaws.com/assets.pytorch.org/pted2021/posters/thumb-D8.png - title: - " GaNDLF \u2013 A Generally Nuanced Deep Learning Framework for Clinical\ - \ Imaging Workflows" diff --git a/_devel/formatter.py b/_devel/formatter.py deleted file mode 100644 index 9624af4a9e67..000000000000 --- a/_devel/formatter.py +++ /dev/null @@ -1,64 +0,0 @@ -""" -Usage: cat pytorch_vision_vgg.md | python formatter.py | notedown >pytorch_vision_vgg.ipynb -""" -import sys -import yaml - -header = [] -markdown = [] -header_read = False -with open('/dev/stdin', 'r') as input, open('/dev/stdout', 'w') as output: - for line in input: - if line.startswith('---'): - header_read = not header_read - continue - if header_read == True: - header += [line] - else: - markdown += [line] - - header = yaml.load(''.join(header), Loader=yaml.BaseLoader) - if header is None: - # This assumes the markdown document has a yaml header - # but some documents, like the README.md do not - # Don't bother rendering them - exit() - - images = [] - try: - if header['featured_image_1'] != 'no-image': - images.append(header['featured_image_1']) - if header['featured_image_2'] != 'no-image': - images.append(header['featured_image_2']) - except: - pass - - pre = [] - - if 'accelerator' in header.keys(): - acc = header['accelerator'] - if acc == 'cuda': - note = ['### This notebook requires a GPU runtime to run.\n', - '### Please select the menu option "Runtime" -> "Change runtime type", select "Hardware Accelerator" -> "GPU" and click "SAVE"\n\n', - '----------------------------------------------------------------------\n\n'] - pre += note - elif acc == 'cuda-optional': - note = ['### This notebook is optionally accelerated with a GPU runtime.\n', - '### If you would like to use this acceleration, please select the menu option "Runtime" -> "Change runtime type", select "Hardware Accelerator" -> "GPU" and click "SAVE"\n\n', - '----------------------------------------------------------------------\n\n'] - pre += note - - pre += ['# ' + header['title'] + '\n\n'] - pre += ['*Author: ' + header['author'] + '*' + '\n\n'] - pre += ['**' + header['summary'] + '**' + '\n\n'] - - if len(images) == 2: - pre += ['_ | _\n'] - pre += ['- | -\n'] - pre += ['![alt](https://pytorch.org/assets/images/{}) | ' - '![alt](https://pytorch.org/assets/images/{})\n\n'.format(*images)] - elif len(images) == 1: - pre += ['alt\n\n'.format(*images)] - - markdown = pre + markdown - output.write(''.join(markdown)) diff --git a/_devel/update_hub_submodule.sh b/_devel/update_hub_submodule.sh deleted file mode 100755 index 8e35a6b6f02d..000000000000 --- a/_devel/update_hub_submodule.sh +++ /dev/null @@ -1,16 +0,0 @@ -set -ex -pushd _hub -git pull https://github.com/pytorch/hub -popd -cp _hub/images/* assets/images/ - -python3 -c 'import notedown' || pip3 install notedown -python3 -c 'import yaml' || pip3 install pyyaml -mkdir -p assets/hub/ - -pushd _hub -find . -maxdepth 1 -name "*.md" | grep -v "README" | cut -f2- -d"/" | - while read file; do - cat "$file" | python3 ../_devel/formatter.py | notedown >"../assets/hub/${file%.md}.ipynb"; - done -popd diff --git a/_ecosystem/Captum b/_ecosystem/Captum deleted file mode 100644 index 2da478fe4963..000000000000 --- a/_ecosystem/Captum +++ /dev/null @@ -1,12 +0,0 @@ ---- -layout: ecosystem_detail -title: Captum -summary: Captum (“comprehension” in Latin) is an open source, extensible library for model interpretability built on PyTorch. -link: https://captum.ai/ -order: 4 -summary-home: Captum (“comprehension” in Latin) is an open source, extensible library for model interpretability built on PyTorch. -featured-home: true -redirect_to: https://captum.ai/ -github-id: pytorch/captum -date-added: 10/18/19 ---- diff --git a/_ecosystem/Flair b/_ecosystem/Flair deleted file mode 100644 index 4d76513c939a..000000000000 --- a/_ecosystem/Flair +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Flair -summary: Flair is a very simple framework for state-of-the-art natural language processing (NLP). -link: https://github.com/flairNLP/flair -order: 6 -redirect_to: https://github.com/flairNLP/flair -github-id: flairNLP/flair -date-added: 12/30/19 ---- diff --git a/_ecosystem/Forte b/_ecosystem/Forte deleted file mode 100644 index 040bd5bb4a65..000000000000 --- a/_ecosystem/Forte +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: forte -summary: Forte is a toolkit for building NLP pipelines featuring composable components, convenient data interfaces, and cross-task interaction. -link: https://github.com/asyml/forte -summary-home: Forte is a toolkit for building NLP pipelines featuring composable components, convenient data interfaces, and cross-task interaction. -featured-home: false -github-id: asyml/forte -date-added: 07/19/21 ---- diff --git a/_ecosystem/Ignite b/_ecosystem/Ignite deleted file mode 100644 index faf5f4792ef8..000000000000 --- a/_ecosystem/Ignite +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Ignite -summary: Ignite is a high-level library for training neural networks in PyTorch. It helps with writing compact, but full-featured training loops. -link: https://github.com/pytorch/ignite -order: 10 -redirect_to: https://github.com/pytorch/ignite -github-id: pytorch/ignite -date-added: 7/14/19 ---- diff --git a/_ecosystem/OpenMMLab b/_ecosystem/OpenMMLab deleted file mode 100644 index 8f3a1f047e65..000000000000 --- a/_ecosystem/OpenMMLab +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: OpenMMLab -summary: OpenMMLab covers a wide range of computer vision research topics including classification, detection, segmentation, and super-resolution. -link: https://github.com/open-mmlab -summary-home: OpenMMLab covers a wide range of computer vision research topics including classification, detection, segmentation, and super-resolution. -featured-home: false -github-id: open-mmlab -date-added: 06/27/21 ---- diff --git a/_ecosystem/accelerate b/_ecosystem/accelerate deleted file mode 100644 index ab0316743384..000000000000 --- a/_ecosystem/accelerate +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: accelerate -summary: 🚀 A simple way to train and use PyTorch models with multi-GPU, TPU, mixed-precision -link: https://huggingface.co/docs/accelerate -summary-home: 🚀 A simple way to train and use PyTorch models with multi-GPU, TPU, mixed-precision -featured-home: false -github-id: huggingface/accelerate -date-added: 09/13/21 ---- diff --git a/_ecosystem/adaptdl b/_ecosystem/adaptdl deleted file mode 100644 index e48192595d6f..000000000000 --- a/_ecosystem/adaptdl +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: AdaptDL -summary: AdaptDL is a resource-adaptive deep learning training and scheduling framework. -link: https://github.com/petuum/adaptdl -summary-home: AdaptDL is a resource-adaptive deep learning training and scheduling framework. -featured-home: false -github-id: petuum/adaptdl -date-added: 2/5/21 ---- \ No newline at end of file diff --git a/_ecosystem/advertorch.md b/_ecosystem/advertorch.md deleted file mode 100644 index fc35a8dacb2a..000000000000 --- a/_ecosystem/advertorch.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -layout: ecosystem_detail -title: AdverTorch -summary: A toolbox for adversarial robustness research. It contains modules for generating adversarial examples and defending against attacks. -link: https://github.com/BorealisAI/advertorch -order: 1 -summary-home: A toolbox for adversarial robustness research. It contains modules for generating adversarial examples and defending against attacks. -featured-home: false -redirect_to: https://github.com/BorealisAI/advertorch -github-id: BorealisAI/advertorch -date-added: 6/14/19 ---- - diff --git a/_ecosystem/albumentations b/_ecosystem/albumentations deleted file mode 100644 index a91565536538..000000000000 --- a/_ecosystem/albumentations +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Albumentations -summary: Fast and extensible image augmentation library for different CV tasks like classification, segmentation, object detection and pose estimation. -link: https://github.com/albu/albumentations -summary-home: Fast and extensible image augmentation library for different CV tasks like classification, segmentation, object detection and pose estimation. -featured-home: false -github-id: albumentations-team/albumentations -date-added: 10/28/19 ---- diff --git a/_ecosystem/allennlp.md b/_ecosystem/allennlp.md deleted file mode 100644 index 37f1fab01642..000000000000 --- a/_ecosystem/allennlp.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -layout: ecosystem_detail -title: AllenNLP -summary: AllenNLP is an open-source research library built on PyTorch for designing and evaluating deep learning models for NLP. -link: https://allennlp.org/ -order: 2 -summary-home: AllenNLP is an open-source research library built on PyTorch for designing and evaluating deep learning models for NLP. -featured-home: false -redirect_to: https://allennlp.org/ -github-id: allenai/allennlp -date-added: 6/14/19 ---- - diff --git a/_ecosystem/avalanche b/_ecosystem/avalanche deleted file mode 100644 index 76e0fe500add..000000000000 --- a/_ecosystem/avalanche +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: avalanche -summary: "Avalanche: an End-to-End Library for Continual Learning" -link: http://avalanche.continualai.org -summary-home: "Avalanche: an End-to-End Library for Continual Learning" -featured-home: false -github-id: ContinualAI/avalanche -date-added: 02/23/22 ---- diff --git a/_ecosystem/baal b/_ecosystem/baal deleted file mode 100644 index c10b4b841d4c..000000000000 --- a/_ecosystem/baal +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: baal -summary: baal (bayesian active learning) aims to implement active learning using metrics of uncertainty derived from approximations of bayesian posteriors in neural networks. -link: https://baal.readthedocs.io/en/latest/ -summary-home: baal (bayesian active learning) aims to implement active learning using metrics of uncertainty derived from approximations of bayesian posteriors in neural networks. -featured-home: false -github-id: ElementAI/baal -date-added: 3/19/20 ---- diff --git a/_ecosystem/botorch b/_ecosystem/botorch deleted file mode 100644 index 2f83838be2d8..000000000000 --- a/_ecosystem/botorch +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: BoTorch -summary: BoTorch is a library for Bayesian Optimization. It provides a modular, extensible interface for composing Bayesian optimization primitives. -link: https://botorch.org/ -order: 3 -redirect_to: https://botorch.org/ -github-id: pytorch/botorch -date-added: 6/14/19 ---- diff --git a/_ecosystem/catalyst b/_ecosystem/catalyst deleted file mode 100644 index 10ad675939f4..000000000000 --- a/_ecosystem/catalyst +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Catalyst -summary: Catalyst helps you write compact, but full-featured deep learning and reinforcement learning pipelines with a few lines of code. -link: https://github.com/catalyst-team/catalyst -summary-home: Catalyst helps you write compact, but full-featured deep learning and reinforcement learning pipelines with a few lines of code. -featured-home: false -github-id: catalyst-team/catalyst -date-added: 10/28/19 ---- diff --git a/_ecosystem/clinicadl b/_ecosystem/clinicadl deleted file mode 100644 index 8b0707e58d49..000000000000 --- a/_ecosystem/clinicadl +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: ClinicaDL -summary: Framework for reproducible classification of Alzheimer's Disease -link: https://clinicadl.readthedocs.io/ -summary-home: Framework for reproducible classification of Alzheimer's Disease -featured-home: false -github-id: aramis-lab/AD-DL -date-added: 05/07/21 ---- diff --git a/_ecosystem/colossal b/_ecosystem/colossal deleted file mode 100644 index a3425c26d1fb..000000000000 --- a/_ecosystem/colossal +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: ColossalAI -summary: Colossal-AI is a Unified Deep Learning System for Big Model Era -link: https://www.colossalai.org/ -summary-home: Colossal-AI is a Unified Deep Learning System for Big Model Era -featured-home: false -github-id: hpcaitech/ColossalAI -date-added: 01/04/23 ---- diff --git a/_ecosystem/colossal-llama-2 b/_ecosystem/colossal-llama-2 deleted file mode 100644 index ab2751f4292b..000000000000 --- a/_ecosystem/colossal-llama-2 +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Colossal-LLaMA-2 -summary: A complete and open-sourced solution for injecting domain-specific knowledge into pre-trained LLM. -link: https://github.com/hpcaitech/ColossalAI/tree/main/applications/Colossal-LLaMA-2 -summary-home: A complete and open-sourced solution for injecting domain-specific knowledge into pre-trained LLM. -featured-home: false -github-id: hpcaitech/ColossalAI -date-added: 1/24/24 ---- diff --git a/_ecosystem/composer b/_ecosystem/composer deleted file mode 100644 index d61e18974197..000000000000 --- a/_ecosystem/composer +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: composer -summary: library of algorithms to speed up neural network training -link: https://github.com/mosaicml/composer -summary-home: library of algorithms to speed up neural network training -featured-home: false -github-id: mosaicml/composer -date-added: 03/28/22 ---- diff --git a/_ecosystem/crypten b/_ecosystem/crypten deleted file mode 100644 index c3a5420cfcaa..000000000000 --- a/_ecosystem/crypten +++ /dev/null @@ -1,12 +0,0 @@ ---- -layout: ecosystem_detail -title: CrypTen -summary: CrypTen is a framework for Privacy Preserving ML. Its goal is to make secure computing techniques accessible to ML practitioners. -link: https://github.com/facebookresearch/CrypTen -order: 5 -summary-home: CrypTen is a framework for Privacy Preserving ML. Its goal is to make secure computing techniques accessible to ML practitioners. -featured-home: false -redirect_to: https://github.com/facebookresearch/CrypTen -github-id: facebookresearch/CrypTen -date-added: 10/18/19 ---- diff --git a/_ecosystem/deepspeed b/_ecosystem/deepspeed deleted file mode 100644 index a2f81619f877..000000000000 --- a/_ecosystem/deepspeed +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: DeepSpeed -summary: DeepSpeed is a deep learning optimization library that makes distributed training easy, efficient, and effective. -link: https://www.deepspeed.ai/ -summary-home: DeepSpeed is a deep learning optimization library that makes distributed training easy, efficient, and effective. -featured-home: false -github-id: microsoft/DeepSpeed -date-added: 11/13/20 ---- diff --git a/_ecosystem/depyf b/_ecosystem/depyf deleted file mode 100644 index f8a9a7d2543c..000000000000 --- a/_ecosystem/depyf +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: depyf -summary: depyf is a tool to help users understand and adapt to PyTorch compiler torch.compile. -link: https://github.com/thuml/depyf -summary-home: depyf is a tool to help users understand and adapt to PyTorch compiler torch.compile. -featured-home: false -github-id: thuml/depyf -date-added: 1/24/24 ---- diff --git a/_ecosystem/detectron2 b/_ecosystem/detectron2 deleted file mode 100644 index 2cf4899ae13e..000000000000 --- a/_ecosystem/detectron2 +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Detectron2 -summary: Detectron2 is FAIR's next-generation platform for object detection and segmentation. -link: https://github.com/facebookresearch/detectron2 -summary-home: Detectron2 is FAIR's next-generation platform for object detection and segmentation. -featured-home: false -github-id: facebookresearch/detectron2 -date-added: 3/27/20 ---- diff --git a/_ecosystem/determined b/_ecosystem/determined deleted file mode 100644 index 67e7e8b467c1..000000000000 --- a/_ecosystem/determined +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Determined -summary: Determined is a platform that helps deep learning teams train models more quickly, easily share GPU resources, and effectively collaborate. -link: https://github.com/determined-ai/determined -summary-home: Determined is a platform that helps deep learning teams train models more quickly, easily share GPU resources, and effectively collaborate. -featured-home: false -github-id: determined-ai/determined -date-added: 9/8/20 ---- diff --git a/_ecosystem/dgl b/_ecosystem/dgl deleted file mode 100644 index 902ba360312b..000000000000 --- a/_ecosystem/dgl +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: DGL -summary: Deep Graph Library (DGL) is a Python package built for easy implementation of graph neural network model family, on top of PyTorch and other frameworks. -link: https://www.dgl.ai -summary-home: Deep Graph Library (DGL) is a Python package built for easy implementation of graph neural network model family, on top of PyTorch and other frameworks. -featured-home: false -github-id: dmlc/dgl -date-added: 3/3/20 ---- diff --git a/_ecosystem/diffusers b/_ecosystem/diffusers deleted file mode 100644 index 882922f60ed9..000000000000 --- a/_ecosystem/diffusers +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Diffusers -summary: Diffusers provides pretrained diffusion models across multiple modalities, such as vision and audio, and serves as a modular toolbox for inference and training of diffusion models. -link: https://huggingface.co/docs/diffusers -summary-home: Diffusers provides pretrained diffusion models across multiple modalities, such as vision and audio, and serves as a modular toolbox for inference and training of diffusion models. -featured-home: false -github-id: huggingface/diffusers -date-added: 6/1/23 ---- diff --git a/_ecosystem/doctr b/_ecosystem/doctr deleted file mode 100644 index 91ed5d688557..000000000000 --- a/_ecosystem/doctr +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: docTR -summary: docTR (Document Text Recognition) - a seamless, high-performing & accessible library for OCR-related tasks powered by Deep Learning. -link: https://github.com/mindee/doctr -summary-home: docTR (Document Text Recognition) - a seamless, high-performing & accessible library for OCR-related tasks powered by Deep Learning. -featured-home: false -github-id: mindee/doctr -date-added: 12/3/24 ---- diff --git a/_ecosystem/einops b/_ecosystem/einops deleted file mode 100644 index 62d776040b6e..000000000000 --- a/_ecosystem/einops +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: einops -summary: Flexible and powerful tensor operations for readable and reliable code. -link: https://github.com/arogozhnikov/einops -summary-home: Flexible and powerful tensor operations for readable and reliable code. -featured-home: false -github-id: arogozhnikov/einops -date-added: 1/20/21 ---- diff --git a/_ecosystem/ensemble-pytorch b/_ecosystem/ensemble-pytorch deleted file mode 100644 index 1ff8367ba575..000000000000 --- a/_ecosystem/ensemble-pytorch +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Ensemble-Pytorch -summary: A unified ensemble framework for PyTorch to improve the performance and robustness of your deep learning model. -link: https://ensemble-pytorch.readthedocs.io -summary-home: A unified ensemble framework for PyTorch to improve the performance and robustness of your deep learning model. -featured-home: false -github-id: TorchEnsemble-Community/Ensemble-Pytorch -date-added: 06/02/21 ---- diff --git a/_ecosystem/fairscale b/_ecosystem/fairscale deleted file mode 100644 index 87d8e468721b..000000000000 --- a/_ecosystem/fairscale +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: FairScale -summary: FairScale is a PyTorch extension library for high performance and large scale training on one or multiple machines/nodes. -link: https://github.com/facebookresearch/fairscale -summary-home: FairScale is a PyTorch extension library for high performance and large scale training on one or multiple machines/nodes. -featured-home: false -github-id: facebookresearch/fairscale -date-added: 1/22/21 ---- diff --git a/_ecosystem/fastai b/_ecosystem/fastai deleted file mode 100644 index 0ecaaf9017ba..000000000000 --- a/_ecosystem/fastai +++ /dev/null @@ -1,13 +0,0 @@ ---- -layout: ecosystem_detail -title: fastai -summary: fastai is a library that simplifies training fast and accurate neural nets using modern best practices. -link: https://docs.fast.ai -order: 5 -summary-home: fastai is a library that simplifies training fast and accurate neural nets using modern best practices. -featured-home: false -redirect_to: https://docs.fast.ai -github-id: fastai/fastai -date-added: 7/14/19 ---- - diff --git a/_ecosystem/flower b/_ecosystem/flower deleted file mode 100644 index 8ef1bdbcad1e..000000000000 --- a/_ecosystem/flower +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Flower -summary: Flower - A Friendly Federated Learning Framework -link: https://flower.dev -summary-home: Flower - A Friendly Federated Learning Framework -featured-home: false -github-id: adap/flower -date-added: 01/05/22 ---- diff --git a/_ecosystem/fusemedml b/_ecosystem/fusemedml deleted file mode 100644 index ab588de504b0..000000000000 --- a/_ecosystem/fusemedml +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: FuseMedML -summary: FuseMedML is a python framework accelerating ML based discovery in the medical field by encouraging code reuse -link: https://github.com/BiomedSciAI/fuse-med-ml -summary-home: FuseMedML is a python framework accelerating ML based discovery in the medical field by encouraging code reuse -featured-home: false -github-id: BiomedSciAI/fuse-med-ml -date-added: 02/16/23 ---- diff --git a/_ecosystem/gandlf b/_ecosystem/gandlf deleted file mode 100644 index 39acd653939a..000000000000 --- a/_ecosystem/gandlf +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: GaNDLF -summary: A generalizable application framework for segmentation, regression, and classification using PyTorch -link: https://mlcommons.github.io/GaNDLF/ -summary-home: A generalizable application framework for segmentation, regression, and classification using PyTorch -featured-home: false -github-id: mlcommons/GaNDLF -date-added: 05/07/21 ---- diff --git a/_ecosystem/glow.md b/_ecosystem/glow.md deleted file mode 100644 index 56503644e0ae..000000000000 --- a/_ecosystem/glow.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -layout: ecosystem_detail -title: Glow -summary: Glow is a ML compiler that accelerates the performance of deep learning frameworks on different hardware platforms. -link: https://github.com/pytorch/glow -order: 7 -summary-home: Glow is a ML compiler that accelerates the performance of deep learning frameworks on different hardware platforms. -featured-home: false -logo-class: tool -redirect_to: https://github.com/pytorch/glow -github-id: pytorch/glow -date-added: 3/27/20 ---- diff --git a/_ecosystem/gpytorch.md b/_ecosystem/gpytorch.md deleted file mode 100644 index 1082637bfe51..000000000000 --- a/_ecosystem/gpytorch.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: GPyTorch -summary: GPyTorch is a Gaussian process library implemented using PyTorch, designed for creating scalable, flexible Gaussian process models. -link: https://cornellius-gp.github.io/ -order: 8 -redirect_to: https://cornellius-gp.github.io/ -github-id: cornellius-gp/gpytorch -date-added: 7/14/19 ---- diff --git a/_ecosystem/higher b/_ecosystem/higher deleted file mode 100644 index c9129977d96a..000000000000 --- a/_ecosystem/higher +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: higher -summary: higher is a library which facilitates the implementation of arbitrarily complex gradient-based meta-learning algorithms and nested optimisation loops with near-vanilla PyTorch. -link: https://github.com/facebookresearch/higher -summary-home: higher is a library which facilitates the implementation of arbitrarily complex gradient-based meta-learning algorithms and nested optimisation loops with near-vanilla PyTorch. -featured-home: false -github-id: facebookresearch/higher -date-added: 5/21/20 ---- diff --git a/_ecosystem/horovod b/_ecosystem/horovod deleted file mode 100644 index 763062c8c12c..000000000000 --- a/_ecosystem/horovod +++ /dev/null @@ -1,13 +0,0 @@ ---- -layout: ecosystem_detail -title: Horovod -summary: Horovod is a distributed training library for deep learning frameworks. Horovod aims to make distributed DL fast and easy to use. -link: http://horovod.ai -order: 9 -summary-home: Horovod is a distributed training library for deep learning frameworks. Horovod aims to make distributed DL fast and easy to use. -featured-home: false -redirect_to: http://horovod.ai -github-id: horovod/horovod -date-added: 7/14/19 ---- - diff --git a/_ecosystem/hummingbird b/_ecosystem/hummingbird deleted file mode 100644 index c68544d17378..000000000000 --- a/_ecosystem/hummingbird +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Hummingbird -summary: Hummingbird compiles trained ML models into tensor computation for faster inference. -link: https://github.com/microsoft/hummingbird -summary-home: Hummingbird compiles trained ML models into tensor computation for faster inference. -featured-home: false -github-id: microsoft/hummingbird -date-added: 6/17/20 ---- diff --git a/_ecosystem/hydra b/_ecosystem/hydra deleted file mode 100644 index e130e2d98c48..000000000000 --- a/_ecosystem/hydra +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Hydra -summary: A framework for elegantly configuring complex applications. -link: https://hydra.cc/ -summary-home: A framework for elegantly configuring complex applications. -featured-home: false -github-id: facebookresearch/hydra -date-added: 1/6/20 ---- diff --git a/_ecosystem/inc b/_ecosystem/inc deleted file mode 100644 index be46670b1ea7..000000000000 --- a/_ecosystem/inc +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: neural-compressor -summary: IntelÂź Neural Compressor provides unified APIs for network compression technologies for faster inference -link: https://intel.github.io/neural-compressor/ -summary-home: IntelÂź Neural Compressor provides unified APIs for network compression technologies for faster inference -featured-home: false -github-id: intel/neural-compressor -date-added: 03/28/22 ---- diff --git a/_ecosystem/ipex b/_ecosystem/ipex deleted file mode 100644 index 9b04cd5faa37..000000000000 --- a/_ecosystem/ipex +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: intel-extension-for-pytorch -summary: A Python package for improving PyTorch performance on Intel platforms -link: https://intel.github.io/intel-extension-for-pytorch/ -summary-home: A Python package for improving PyTorch performance on Intel platforms -featured-home: false -github-id: intel/intel-extension-for-pytorch -date-added: 02/16/22 ---- diff --git a/_ecosystem/ivy b/_ecosystem/ivy deleted file mode 100644 index bd51388a497c..000000000000 --- a/_ecosystem/ivy +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: ivy -summary: The Unified Machine Learning Framework -link: https://lets-unify.ai -summary-home: The Unified Machine Learning Framework -featured-home: false -github-id: unifyai/ivy -date-added: 02/23/22 ---- diff --git a/_ecosystem/joeynmt b/_ecosystem/joeynmt deleted file mode 100644 index 1758bdda543b..000000000000 --- a/_ecosystem/joeynmt +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: joeynmt -summary: Minimalist Neural Machine Translation toolkit for educational purposes -link: https://joeynmt.readthedocs.io/en/latest/ -summary-home: Minimalist Neural Machine Translation toolkit for educational purposes -featured-home: false -github-id: joeynmt/joeynmt -date-added: 05/07/21 ---- diff --git a/_ecosystem/kornia b/_ecosystem/kornia deleted file mode 100644 index 18bc91095281..000000000000 --- a/_ecosystem/kornia +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Kornia -summary: Kornia is a differentiable computer vision library that consists of a set of routines and differentiable modules to solve generic CV problems. -link: https://kornia.github.io/ -summary-home: Kornia is a differentiable computer vision library that consists of a set of routines and differentiable modules to solve generic CV problems. -featured-home: false -github-id: kornia/kornia -date-added: 10/29/19 ---- diff --git a/_ecosystem/l5kit b/_ecosystem/l5kit deleted file mode 100644 index f76ac00d8739..000000000000 --- a/_ecosystem/l5kit +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: L5Kit -summary: ML Prediction, Planning and Simulation for Self-Driving built on PyTorch. -link: https://github.com/lyft/l5kit -summary-home: ML Prediction, Planning and Simulation for Self-Driving built on PyTorch. -featured-home: false -github-id: lyft/l5kit -date-added: 1/10/21 ---- diff --git a/_ecosystem/lightly b/_ecosystem/lightly deleted file mode 100644 index 6bb024568c19..000000000000 --- a/_ecosystem/lightly +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Lightly -summary: Lightly is a computer vision framework for self-supervised learning. -link: https://github.com/lightly-ai/lightly -summary-home: Lightly is a computer vision framework for self-supervised learning. -featured-home: false -github-id: lightly-ai/lightly -date-added: 08/23/21 ---- diff --git a/_ecosystem/ludwig b/_ecosystem/ludwig deleted file mode 100644 index db30bab6e0b5..000000000000 --- a/_ecosystem/ludwig +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: ludwig -summary: Data-centric declarative deep learning framework -link: http://ludwig.ai -summary-home: Data-centric declarative deep learning framework -featured-home: false -github-id: ludwig-ai/ludwig -date-added: 05/20/22 ---- diff --git a/_ecosystem/mmf b/_ecosystem/mmf deleted file mode 100644 index f24d9a683f6b..000000000000 --- a/_ecosystem/mmf +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: MMF -summary: A modular framework for vision & language multimodal research from Facebook AI Research (FAIR). -link: https://mmf.sh/ -summary-home: A modular framework for vision & language multimodal research from Facebook AI Research (FAIR). -featured-home: false -github-id: facebookresearch/mmf -date-added: 6/11/20 ---- diff --git a/_ecosystem/monai b/_ecosystem/monai deleted file mode 100644 index 0228b06bc3ad..000000000000 --- a/_ecosystem/monai +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: MONAI -summary: MONAI provides domain-optimized foundational capabilities for developing healthcare imaging training workflows. -link: https://monai.io -summary-home: MONAI provides domain-optimized foundational capabilities for developing healthcare imaging training workflows. -featured-home: false -github-id: Project-MONAI/MONAI -date-added: 5/1/20 ---- diff --git a/_ecosystem/nemo b/_ecosystem/nemo deleted file mode 100644 index 3cfb69d31c09..000000000000 --- a/_ecosystem/nemo +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: NeMo -summary: "NeMo: a toolkit for conversational AI." -link: https://github.com/NVIDIA/NeMo -summary-home: "NeMo: a toolkit for conversational AI" -featured-home: false -github-id: NVIDIA/NeMo -date-added: 6/16/20 ---- diff --git a/_ecosystem/octoml b/_ecosystem/octoml deleted file mode 100644 index 43ce44ca1589..000000000000 --- a/_ecosystem/octoml +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: OctoML Profile -summary: octoml-profile is a python library and cloud service designed to provide a simple experience for assessing and optimizing the performance of PyTorch models. -link: https://github.com/octoml/octoml-profile -summary-home: octoml-profile is a python library and cloud service designed to provide a simple experience for assessing and optimizing the performance of PyTorch models. -featured-home: false -github-id: octoml/octoml-profile -date-added: 6/1/23 ---- diff --git a/_ecosystem/onnxrt b/_ecosystem/onnxrt deleted file mode 100644 index f2e6e688442e..000000000000 --- a/_ecosystem/onnxrt +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: ONNX Runtime -summary: ONNX Runtime is a cross-platform inferencing and training accelerator. -link: https://github.com/microsoft/onnxruntime -summary-home: ONNX Runtime is a cross-platform inferencing and training accelerator. -featured-home: false -github-id: microsoft/onnxruntime -date-added: 2/1/21 ---- diff --git a/_ecosystem/opacus b/_ecosystem/opacus deleted file mode 100644 index 91671d612731..000000000000 --- a/_ecosystem/opacus +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Opacus -summary: Train PyTorch models with Differential Privacy -link: https://opacus.ai/ -summary-home: Train PyTorch models with Differential Privacy -featured-home: false -github-id: pytorch/opacus -date-added: 10/29/20 ---- diff --git a/_ecosystem/opencompass b/_ecosystem/opencompass deleted file mode 100644 index a55a4ef31f61..000000000000 --- a/_ecosystem/opencompass +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: OpenCompass -summary: OpenCompass is an LLM evaluation platform, supporting a wide range of models (Llama3, Mistral, InternLM2,GPT-4,LLaMa2, Qwen,GLM, Claude, etc) over 100+ datasets. -link: https://github.com/open-compass/opencompass -summary-home: OpenCompass is an LLM evaluation platform, supporting a wide range of models (Llama3, Mistral, InternLM2,GPT-4,LLaMa2, Qwen,GLM, Claude, etc) over 100+ datasets. -featured-home: false -github-id: open-compass/opencompass -date-added: 12/18/24 ---- diff --git a/_ecosystem/optuna b/_ecosystem/optuna deleted file mode 100644 index beff1d69bcd5..000000000000 --- a/_ecosystem/optuna +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Optuna -summary: An open source hyperparameter optimization framework to automate hyperparameter search. -link: https://optuna.org/ -summary-home: An open source hyperparameter optimization framework to automate hyperparameter search. -featured-home: false -github-id: optuna/optuna -date-added: 4/6/20 ---- diff --git a/_ecosystem/padl b/_ecosystem/padl deleted file mode 100644 index ac3f5818a67b..000000000000 --- a/_ecosystem/padl +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: padl -summary: Pipeline Abstractions for Deep Learning in PyTorch -link: https://lf1-io.github.io/padl/ -summary-home: Pipeline Abstractions for Deep Learning in PyTorch -featured-home: false -github-id: lf1-io/padl -date-added: 03/28/22 ---- diff --git a/_ecosystem/parlai b/_ecosystem/parlai deleted file mode 100644 index cac2924c3ad8..000000000000 --- a/_ecosystem/parlai +++ /dev/null @@ -1,12 +0,0 @@ ---- -layout: ecosystem_detail -title: ParlAI -summary: ParlAI is a unified platform for sharing, training, and evaluating dialog models across many tasks. -link: http://parl.ai/ -order: 11 -summary-home: ParlAI is a unified platform for sharing, training, and evaluating dialog models across many tasks. -featured-home: false -redirect_to: http://parl.ai/ -github-id: facebookresearch/ParlAI -date-added: 7/14/19 ---- diff --git a/_ecosystem/pennylane b/_ecosystem/pennylane deleted file mode 100644 index eb4ba2e24643..000000000000 --- a/_ecosystem/pennylane +++ /dev/null @@ -1,12 +0,0 @@ ---- -layout: ecosystem_detail -title: PennyLane -summary: PennyLane is a library for quantum ML, automatic differentiation, and optimization of hybrid quantum-classical computations. -link: https://pennylane.ai/ -order: 12 -summary-home: PennyLane is a library for quantum ML, automatic differentiation, and optimization of hybrid quantum-classical computations. -featured-home: false -redirect_to: https://pennylane.ai/ -github-id: PennyLaneAI/pennylane -date-added: 7/14/19 ---- diff --git a/_ecosystem/pfrl b/_ecosystem/pfrl deleted file mode 100644 index 8b33e14229b9..000000000000 --- a/_ecosystem/pfrl +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: PFRL -summary: PFRL is a deep reinforcement learning library that implements various state-of-the-art deep reinforcement algorithms in Python using PyTorch. -link: https://github.com/pfnet/pfrl -summary-home: PFRL is a deep reinforcement learning library that implements various state-of-the-art deep reinforcement algorithms in Python using PyTorch. -featured-home: false -github-id: pfnet/pfrl -date-added: 8/6/20 ---- diff --git a/_ecosystem/polyaxon b/_ecosystem/polyaxon deleted file mode 100644 index 54757d85205d..000000000000 --- a/_ecosystem/polyaxon +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Polyaxon -summary: Polyaxon is a platform for building, training, and monitoring large-scale deep learning applications. -link: https://github.com/polyaxon/polyaxon -summary-home: Polyaxon is a platform for building, training, and monitoring large-scale deep learning applications. -featured-home: false -github-id: polyaxon/polyaxon -date-added: 9/17/20 ---- diff --git a/_ecosystem/pomegranate b/_ecosystem/pomegranate deleted file mode 100644 index 984b55851cfc..000000000000 --- a/_ecosystem/pomegranate +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: pomegranate -summary: pomegranate is a library of probabilistic models that is built in a modular manner and treats all models as the probability distributions that they are. -link: https://pomegranate.readthedocs.io/en/latest/ -summary-home: pomegranate is a library of probabilistic models that is built in a modular manner and treats all models as the probability distributions that they are. -featured-home: false -github-id: jmschrei/pomegranate -date-added: 6/1/23 ---- diff --git a/_ecosystem/poptorch b/_ecosystem/poptorch deleted file mode 100644 index 15295d77db91..000000000000 --- a/_ecosystem/poptorch +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: PopTorch -summary: The PopTorch interface library is a simple wrapper for running PyTorch programs directly on Graphcore IPUs. -link: https://docs.graphcore.ai/projects/poptorch-user-guide/en/latest/ -summary-home: The PopTorch interface library is a simple wrapper for running PyTorch programs directly on Graphcore IPUs. -featured-home: false -github-id: graphcore/poptorch -date-added: 3/23/21 ---- diff --git a/_ecosystem/poutyne b/_ecosystem/poutyne deleted file mode 100644 index 9ac823ab7ccb..000000000000 --- a/_ecosystem/poutyne +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Poutyne -summary: Poutyne is a Keras-like framework for PyTorch and handles much of the boilerplating code needed to train neural networks. -link: https://poutyne.org/ -summary-home: Poutyne is a Keras-like framework for PyTorch and handles much of the boilerplating code needed to train neural networks. -featured-home: false -github-id: GRAAL-Research/poutyne -date-added: 2/13/20 ---- diff --git a/_ecosystem/pykale b/_ecosystem/pykale deleted file mode 100644 index dd8338c18d77..000000000000 --- a/_ecosystem/pykale +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: PyKale -summary: PyKale is a PyTorch library for multimodal learning and transfer learning with deep learning and dimensionality reduction on graphs, images, texts, and videos. -link: https://github.com/pykale/pykale -summary-home: PyKale is a PyTorch library for multimodal learning and transfer learning with deep learning and dimensionality reduction on graphs, images, texts, and videos. -featured-home: false -github-id: pykale/pykale -date-added: 09/09/21 ---- diff --git a/_ecosystem/pypose b/_ecosystem/pypose deleted file mode 100644 index d4af561ece42..000000000000 --- a/_ecosystem/pypose +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: PyPose -summary: PyPose is a robotics-oriented, PyTorch-based library that combines deep perceptual models with physics-based optimization techniques, so that users can focus on their novel applications. -link: https://pypose.org -summary-home: PyPose is a robotics-oriented, PyTorch-based library that combines deep perceptual models with physics-based optimization techniques, so that users can focus on their novel applications. -featured-home: false -github-id: pypose/pypose -date-added: 6/1/23 ---- diff --git a/_ecosystem/pypots b/_ecosystem/pypots deleted file mode 100644 index b728914b16c8..000000000000 --- a/_ecosystem/pypots +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: PyPOTS -summary: A Python toolbox for data mining on Partially-Observed Time Series (POTS) and helps engineers focus more on the core problems in rather than missing parts in their data. -link: https://github.com/WenjieDu/PyPOTS -summary-home: A Python toolbox for data mining on Partially-Observed Time Series (POTS) and helps engineers focus more on the core problems in rather than missing parts in their data. -featured-home: false -github-id: WenjieDu/PyPOTS -date-added: 6/28/23 ---- diff --git a/_ecosystem/pyro.md b/_ecosystem/pyro.md deleted file mode 100644 index 2b5bf4b99329..000000000000 --- a/_ecosystem/pyro.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Pyro -summary: Pyro is a universal probabilistic programming language (PPL) written in Python and supported by PyTorch on the backend. -link: http://pyro.ai/ -order: 13 -redirect_to: http://pyro.ai/ -github-id: pyro-ppl/pyro -date-added: 7/14/19 ---- diff --git a/_ecosystem/pystiche b/_ecosystem/pystiche deleted file mode 100644 index 1474285b15ac..000000000000 --- a/_ecosystem/pystiche +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: pystiche -summary: pystiche is a framework for Neural Style Transfer (NST) built upon PyTorch. -link: https://github.com/pystiche/pystiche -summary-home: pystiche is a framework for Neural Style Transfer (NST) built upon PyTorch. -featured-home: false -github-id: pystiche/pystiche -date-added: 5/7/21 ---- diff --git a/_ecosystem/pysyft b/_ecosystem/pysyft deleted file mode 100644 index 08ca1b429ed0..000000000000 --- a/_ecosystem/pysyft +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: PySyft -summary: PySyft is a Python library for encrypted, privacy preserving deep learning. -link: https://github.com/OpenMined/PySyft -order: 14 -redirect_to: https://github.com/OpenMined/PySyft -github-id: OpenMined/PySyft -date-added: 7/14/19 ---- diff --git a/_ecosystem/pytorch-geometric b/_ecosystem/pytorch-geometric deleted file mode 100644 index 6dba5ab31a44..000000000000 --- a/_ecosystem/pytorch-geometric +++ /dev/null @@ -1,12 +0,0 @@ ---- -layout: ecosystem_detail -title: PyTorch Geometric -summary: PyTorch Geometric is a library for deep learning on irregular input data such as graphs, point clouds, and manifolds. -link: https://github.com/pyg-team/pytorch_geometric/ -order: 15 -summary-home: PyTorch Geometric is a library for deep learning on irregular input data such as graphs, point clouds, and manifolds. -featured-home: true -redirect_to: https://github.com/pyg-team/pytorch_geometric/ -github-id: pyg-team/pytorch_geometric -date-added: 7/14/19 ---- diff --git a/_ecosystem/pytorch-lightning b/_ecosystem/pytorch-lightning deleted file mode 100644 index c15dae5edfa4..000000000000 --- a/_ecosystem/pytorch-lightning +++ /dev/null @@ -1,12 +0,0 @@ ---- -layout: ecosystem_detail -title: PyTorch Lightning -summary: PyTorch Lightning is a Keras-like ML library for PyTorch. It leaves core training and validation logic to you and automates the rest. -link: https://github.com/williamFalcon/pytorch-lightning -order: 16 -summary-home: PyTorch Lightning is a Keras-like ML library for PyTorch. It leaves core training and validation logic to you and automates the rest. -featured-home: false -redirect_to: https://github.com/williamFalcon/pytorch-lightning -github-id: PyTorchLightning/pytorch-lightning -date-added: 8/14/19 ---- diff --git a/_ecosystem/pytorch-metric-learning b/_ecosystem/pytorch-metric-learning deleted file mode 100644 index 22007f4276d5..000000000000 --- a/_ecosystem/pytorch-metric-learning +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: PyTorch Metric Learning -summary: The easiest way to use deep metric learning in your application. Modular, flexible, and extensible. -link: https://github.com/KevinMusgrave/pytorch-metric-learning -summary-home: The easiest way to use deep metric learning in your application. Modular, flexible, and extensible. -featured-home: false -github-id: KevinMusgrave/pytorch-metric-learning -date-added: 1/20/21 ---- diff --git a/_ecosystem/pytorch-nlp b/_ecosystem/pytorch-nlp deleted file mode 100644 index c56435317f77..000000000000 --- a/_ecosystem/pytorch-nlp +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: PyTorch-NLP -summary: Basic Utilities for PyTorch Natural Language Processing (NLP). -link: https://pytorchnlp.readthedocs.io -summary-home: Basic Utilities for PyTorch Natural Language Processing (NLP). -featured-home: false -github-id: PetrochukM/PyTorch-NLP -date-added: 4/6/20 ---- diff --git a/_ecosystem/pytorch3d b/_ecosystem/pytorch3d deleted file mode 100644 index fbba2cbcb86f..000000000000 --- a/_ecosystem/pytorch3d +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: PyTorch3D -summary: PyTorch3D provides efficient, reusable components for 3D Computer Vision research with PyTorch. -link: https://pytorch3d.org/ -summary-home: PyTorch3D provides efficient, reusable components for 3D Computer Vision research with PyTorch. -featured-home: false -github-id: facebookresearch/pytorch3d -date-added: 3/27/20 ---- diff --git a/_ecosystem/pytorch_geometric_temporal b/_ecosystem/pytorch_geometric_temporal deleted file mode 100644 index 89c13df5b3d5..000000000000 --- a/_ecosystem/pytorch_geometric_temporal +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: PyTorch Geometric Temporal -summary: PyTorch Geometric Temporal is a temporal (dynamic) extension library for PyTorch Geometric. -link: https://github.com/benedekrozemberczki/pytorch_geometric_temporal -summary-home: PyTorch Geometric Temporal is a temporal (dynamic) extension library for PyTorch Geometric. -featured-home: false -github-id: benedekrozemberczki/pytorch_geometric_temporal -date-added: 4/11/21 ---- diff --git a/_ecosystem/pytorchfi b/_ecosystem/pytorchfi deleted file mode 100644 index afa174e5ade0..000000000000 --- a/_ecosystem/pytorchfi +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: pytorchfi -summary: A runtime fault injection tool for PyTorch. -link: https://github.com/pytorchfi/pytorchfi -summary-home: A runtime fault injection tool for PyTorch. -featured-home: false -github-id: pytorchfi/pytorchfi -date-added: 09/08/21 ---- diff --git a/_ecosystem/pytorchvideo b/_ecosystem/pytorchvideo deleted file mode 100644 index 15d778bdc257..000000000000 --- a/_ecosystem/pytorchvideo +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: PyTorchVideo -summary: A deep learning library for video understanding research. Hosts various video-focused models, datasets, training pipelines and more. -link: https://pytorchvideo.org/ -summary-home: A deep learning library for video understanding research. Hosts various video-focused models, datasets, training pipelines and more. -featured-home: false -github-id: facebookresearch/pytorchvideo -date-added: 08/15/21 ---- diff --git a/_ecosystem/rastervision b/_ecosystem/rastervision deleted file mode 100644 index 88ef1358de2e..000000000000 --- a/_ecosystem/rastervision +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: raster-vision -summary: An open source framework for deep learning on satellite and aerial imagery. -link: https://docs.rastervision.io -summary-home: An open source framework for deep learning on satellite and aerial imagery. -featured-home: false -github-id: azavea/raster-vision -date-added: 05/07/21 ---- diff --git a/_ecosystem/ray b/_ecosystem/ray deleted file mode 100644 index aab4600a0da4..000000000000 --- a/_ecosystem/ray +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Ray -summary: Ray is a fast and simple framework for building and running distributed applications. -link: https://github.com/ray-project/ray -summary-home: Ray is a fast and simple framework for building and running distributed applications. -featured-home: false -github-id: ray-project/ray -date-added: 8/20/20 ---- diff --git a/_ecosystem/renate b/_ecosystem/renate deleted file mode 100644 index 46308be32fae..000000000000 --- a/_ecosystem/renate +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Renate -summary: Renate is a library providing tools for re-training pytorch models over time as new data becomes available. -link: https://renate.readthedocs.io/en/latest/ -summary-home: Renate is a library providing tools for re-training pytorch models over time as new data becomes available. -featured-home: false -github-id: awslabs/renate -date-added: 6/1/23 ---- diff --git a/_ecosystem/roma b/_ecosystem/roma deleted file mode 100644 index c1af32c70cbf..000000000000 --- a/_ecosystem/roma +++ /dev/null @@ -1,9 +0,0 @@ ---- -layout: ecosystem_detail -title: RoMa -summary: RoMa is a standalone library to handle rotation representations with PyTorch (rotation matrices, quaternions, rotation vectors, etc). It aims for robustness, ease-of-use, and efficiency. -link: https://github.com/naver/roma -order: 10 -redirect_to: https://github.com/naver/roma -date-added: 9/21/23 ---- diff --git a/_ecosystem/simulai b/_ecosystem/simulai deleted file mode 100644 index 8076cfe9e122..000000000000 --- a/_ecosystem/simulai +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: SimulAI -summary: SimulAI is basically a toolkit with pipelines for physics-informed machine learning. -link: https://github.com/IBM/simulai -summary-home: SimulAI is basically a toolkit with pipelines for physics-informed machine learning. -featured-home: false -github-id: IBM/simulai -date-added: 1/24/24 ---- diff --git a/_ecosystem/skorch b/_ecosystem/skorch deleted file mode 100644 index 24746439398d..000000000000 --- a/_ecosystem/skorch +++ /dev/null @@ -1,12 +0,0 @@ ---- -layout: ecosystem_detail -title: skorch -summary: skorch is a high-level library for PyTorch that provides full scikit-learn compatibility. -link: https://github.com/skorch-dev/skorch -order: 17 -summary-home: skorch is a high-level library for PyTorch that provides full scikit-learn compatibility. -featured-home: true -redirect_to: https://github.com/skorch-dev/skorch -github-id: skorch-dev/skorch -date-added: 8/14/19 ---- diff --git a/_ecosystem/stable-baselines3 b/_ecosystem/stable-baselines3 deleted file mode 100644 index 81d3ebab042f..000000000000 --- a/_ecosystem/stable-baselines3 +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Stable Baselines3 -summary: Stable Baselines3 (SB3) is a set of reliable implementations of reinforcement learning algorithms in PyTorch. -link: https://github.com/DLR-RM/stable-baselines3 -summary-home: Stable Baselines3 (SB3) is a set of reliable implementations of reinforcement learning algorithms. -featured-home: false -github-id: DLR-RM/stable-baselines3 -date-added: 3/29/21 ---- diff --git a/_ecosystem/stoke b/_ecosystem/stoke deleted file mode 100644 index 773b1e32e5f1..000000000000 --- a/_ecosystem/stoke +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: stoke -summary: A lightweight declarative PyTorch wrapper for context switching between devices, distributed modes, mixed-precision, and PyTorch extensions. -link: https://fidelity.github.io/stoke/ -summary-home: A lightweight declarative PyTorch wrapper for context switching between devices, distributed modes, mixed-precision, and PyTorch extensions. -featured-home: false -github-id: fidelity/stoke -date-added: 09/08/21 ---- diff --git a/_ecosystem/substra b/_ecosystem/substra deleted file mode 100644 index 59e194c5976d..000000000000 --- a/_ecosystem/substra +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Substra -summary: Substra is a federated learning Python library to run federated learning experiments at scale on real distributed data. -link: https://github.com/Substra -summary-home: Substra is a federated learning Python library to run federated learning experiments at scale on real distributed data. -featured-home: false -github-id: substra -date-added: 6/28/23 ---- diff --git a/_ecosystem/tensorly.md b/_ecosystem/tensorly.md deleted file mode 100644 index 0e7f36ceeb34..000000000000 --- a/_ecosystem/tensorly.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: TensorLy -summary: TensorLy is a high level API for tensor methods and deep tensorized neural networks in Python that aims to make tensor learning simple. -link: http://tensorly.org/stable/home.html -order: 18 -redirect_to: http://tensorly.org/stable/home.html -github-id: tensorly/tensorly -date-added: 8/14/19 ---- diff --git a/_ecosystem/textbrewer b/_ecosystem/textbrewer deleted file mode 100644 index 88bad3356eea..000000000000 --- a/_ecosystem/textbrewer +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: TextBrewer -summary: A PyTorch-based knowledge distillation toolkit for natural language processing -link: http://textbrewer.hfl-rc.com -summary-home: A PyTorch-based knowledge distillation toolkit for natural language processing -featured-home: false -github-id: airaria/TextBrewer -date-added: 06/02/21 ---- diff --git a/_ecosystem/tiatoolbox b/_ecosystem/tiatoolbox deleted file mode 100644 index d61918d21523..000000000000 --- a/_ecosystem/tiatoolbox +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: TIAToolbox -summary: TIAToolbox provides an easy-to-use API where researchers can use, adapt and create models for CPath. -link: https://github.com/TissueImageAnalytics/tiatoolbox -summary-home: TIAToolbox provides an easy-to-use API where researchers can use, adapt and create models for CPath. -featured-home: false -github-id: TissueImageAnalytics/tiatoolbox -date-added: 6/1/23 ---- diff --git a/_ecosystem/torchdistill b/_ecosystem/torchdistill deleted file mode 100644 index 6224cf04e847..000000000000 --- a/_ecosystem/torchdistill +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: torchdistill -summary: torchdistill is a coding-free framework built on PyTorch for reproducible deep learning and knowledge distillation studies. -link: https://github.com/yoshitomo-matsubara/torchdistill -summary-home: torchdistill is a coding-free framework built on PyTorch for reproducible deep learning and knowledge distillation studies. -featured-home: false -github-id: yoshitomo-matsubara/torchdistill -date-added: 12/05/23 ---- diff --git a/_ecosystem/torchdrift b/_ecosystem/torchdrift deleted file mode 100644 index 714e5015e972..000000000000 --- a/_ecosystem/torchdrift +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: TorchDrift -summary: TorchDrift is a data and concept drift library for PyTorch. It lets you monitor your PyTorch models to see if they operate within spec. -link: https://torchdrift.org -summary-home: TorchDrift is a data and concept drift library for PyTorch. It lets you monitor your PyTorch models to see if they operate within spec. -featured-home: false -github-id: TorchDrift/TorchDrift -date-added: 3/31/21 ---- diff --git a/_ecosystem/torchdrug b/_ecosystem/torchdrug deleted file mode 100644 index 840c10f7efbd..000000000000 --- a/_ecosystem/torchdrug +++ /dev/null @@ -1,11 +0,0 @@ ---- -layout: ecosystem_detail -title: torchdrug -summary: A powerful and flexible machine learning platform for drug discovery. -link: https://torchdrug.ai/ -summary-home: A powerful and flexible machine learning platform for drug discovery. -featured-home: false -github-id: DeepGraphLearning/torchdrug -date-added: 08/19/21 ---- - diff --git a/_ecosystem/torchgeo b/_ecosystem/torchgeo deleted file mode 100644 index 32caf26b9ae2..000000000000 --- a/_ecosystem/torchgeo +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: torchgeo -summary: Datasets, transforms, and models for geospatial data -link: https://github.com/microsoft/torchgeo -summary-home: Datasets, transforms, and models for geospatial data -featured-home: false -github-id: microsoft/torchgeo -date-added: 01/05/22 ---- diff --git a/_ecosystem/torchio b/_ecosystem/torchio deleted file mode 100644 index e7caff8c2ccc..000000000000 --- a/_ecosystem/torchio +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: TorchIO -summary: TorchIO is a set of tools to efficiently read, preprocess, sample, augment, and write 3D medical images in deep learning applications written in PyTorch. -link: https://github.com/fepegar/torchio -summary-home: TorchIO is a set of tools to efficiently read, preprocess, sample, augment, and write 3D medical images in deep learning applications written in PyTorch. -featured-home: false -github-id: fepegar/torchio -date-added: 12/06/20 ---- diff --git a/_ecosystem/torchmetrics b/_ecosystem/torchmetrics deleted file mode 100644 index 4a82d19a12d6..000000000000 --- a/_ecosystem/torchmetrics +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: TorchMetrics -summary: Machine learning metrics for distributed, scalable PyTorch applications. -link: https://github.com/PyTorchLightning/metrics -summary-home: Machine learning metrics for distributed, scalable PyTorch applications. -featured-home: false -github-id: PyTorchLightning/metrics -date-added: 06/22/21 ---- diff --git a/_ecosystem/torchopt b/_ecosystem/torchopt deleted file mode 100644 index 7e1500228210..000000000000 --- a/_ecosystem/torchopt +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: TorchOpt -summary: TorchOpt is a PyTorch-based library for efficient differentiable optimization. -link: https://torchopt.readthedocs.io/en/latest/# -summary-home: TorchOpt is a PyTorch-based library for efficient differentiable optimization. -featured-home: false -github-id: metaopt/TorchOpt -date-added: 6/1/23 ---- diff --git a/_ecosystem/torchpoints3d b/_ecosystem/torchpoints3d deleted file mode 100644 index 95c9a39efca8..000000000000 --- a/_ecosystem/torchpoints3d +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: PyTorch-Points3d -summary: A PyTorch framework for deep learning on point clouds. -link: https://torch-points3d.readthedocs.io/en/latest/ -summary-home: A PyTorch framework for deep learning on point clouds. -featured-home: false -github-id: nicolas-chaulet/torch-points3d -date-added: 5/20/20 ---- diff --git a/_ecosystem/torchquantum b/_ecosystem/torchquantum deleted file mode 100644 index aff150a369e6..000000000000 --- a/_ecosystem/torchquantum +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: TorchQuantum -summary: TorchQuantum is a quantum classical simulation framework based on PyTorch. It supports statevector, density matrix simulation and pulse simulation on different hardware platforms such as CPUs and GPUs. -link: https://hanruiwanghw.wixsite.com/torchquantum -summary-home: TorchQuantum is a quantum classical simulation framework based on PyTorch. It supports statevector, density matrix simulation and pulse simulation on different hardware platforms such as CPUs and GPUs. -featured-home: false -github-id: mit-han-lab/torchquantum -date-added: 6/1/23 ---- diff --git a/_ecosystem/trains b/_ecosystem/trains deleted file mode 100644 index a328ae5dd84f..000000000000 --- a/_ecosystem/trains +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Clear ML -summary: ClearML is a full system ML / DL experiment manager, versioning and ML-Ops solution. -link: https://github.com/allegroai/trains/ -summary-home: ClearML is a full system ML / DL experiment manager, versioning and ML-Ops solution. -featured-home: false -github-id: allegroai/clearml -date-added: 6/17/20 ---- diff --git a/_ecosystem/transformers b/_ecosystem/transformers deleted file mode 100644 index 34d3f19a3904..000000000000 --- a/_ecosystem/transformers +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Transformers -summary: State-of-the-art Natural Language Processing for PyTorch. -link: https://github.com/huggingface/transformers -summary-home: State-of-the-art Natural Language Processing for PyTorch. -featured-home: false -github-id: huggingface/transformers -date-added: 01/18/21 ---- diff --git a/_ecosystem/trtorch b/_ecosystem/trtorch deleted file mode 100644 index fcdeaf03b162..000000000000 --- a/_ecosystem/trtorch +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: Torch-TensorRT -summary: PyTorch/TorchScript compiler for NVIDIA GPUs using TensorRT -link: https://pytorch.org/TensorRT/ -summary-home: PyTorch/TorchScript compiler for NVIDIA GPUs using TensorRT -featured-home: false -github-id: NVIDIA/Torch-TensorRT -date-added: 03/28/22 ---- diff --git a/_ecosystem/usb b/_ecosystem/usb deleted file mode 100644 index 73b8de9a20be..000000000000 --- a/_ecosystem/usb +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: USB -summary: USB is a Pytorch-based Python package for Semi-Supervised Learning (SSL). It is easy-to-use/extend, affordable to small groups, and comprehensive for developing and evaluating SSL algorithms. -link: https://usb.readthedocs.io/ -summary-home: USB is a Pytorch-based Python package for Semi-Supervised Learning (SSL). It is easy-to-use/extend, affordable to small groups, and comprehensive for developing and evaluating SSL algorithms. -featured-home: false -github-id: microsoft/Semi-supervised-learning -date-added: 6/1/23 ---- diff --git a/_ecosystem/vissl b/_ecosystem/vissl deleted file mode 100644 index 12329124081f..000000000000 --- a/_ecosystem/vissl +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: VISSL -summary: A library for state-of-the-art self-supervised learning -link: https://vissl.ai/ -summary-home: A library for state-of-the-art self-supervised learning -featured-home: false -github-id: facebookresearch/vissl -date-added: 2/1/21 ---- diff --git a/_ecosystem/vllm b/_ecosystem/vllm deleted file mode 100644 index 0c510878f4d7..000000000000 --- a/_ecosystem/vllm +++ /dev/null @@ -1,10 +0,0 @@ ---- -layout: ecosystem_detail -title: vllm -summary: vllm is a high-throughput and memory-efficient inference and serving engine for LLMs. -link: https://github.com/vllm-project/vllm -summary-home: vllm is a high-throughput and memory-efficient inference and serving engine for LLMs. -featured-home: false -github-id: vllm-project/vllm -date-added: 12/3/24 ---- diff --git a/_features/cloud-support.md b/_features/cloud-support.md deleted file mode 100644 index 82d9d66911c3..000000000000 --- a/_features/cloud-support.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Cloud Support -order: 8 -snippet: > - ```sh - export IMAGE_FAMILY="pytorch-latest-cpu" - export ZONE="us-west1-b" - export INSTANCE_NAME="my-instance" - - gcloud compute instances create $INSTANCE_NAME \ - --zone=$ZONE \ - --image-family=$IMAGE_FAMILY \ - --image-project=deeplearning-platform-release - ``` - -summary-home: PyTorch is well supported on major cloud platforms, providing frictionless development and easy scaling. -featured-home: true - ---- - -PyTorch is well supported on major cloud platforms, providing frictionless development and easy scaling through prebuilt images, large scale training on GPUs, ability to run models in a production scale environment, and more. diff --git a/_features/cplusplus-front-end.md b/_features/cplusplus-front-end.md deleted file mode 100644 index f0a46ca2cadb..000000000000 --- a/_features/cplusplus-front-end.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: C++ Front-End -order: 7 -snippet: > - ```cpp - #include - - torch::nn::Linear model(num_features, 1); - torch::optim::SGD optimizer(model->parameters()); - auto data_loader = torch::data::data_loader(dataset); - - for (size_t epoch = 0; epoch < 10; ++epoch) { - for (auto batch : data_loader) { - auto prediction = model->forward(batch.data); - auto loss = loss_function(prediction, batch.target); - loss.backward(); - optimizer.step(); - } - } - ``` ---- - -The C++ frontend is a pure C++ interface to PyTorch that follows the design and architecture of the established Python frontend. It is intended to enable research in high performance, low latency and bare metal C++ applications. diff --git a/_features/distributed-training.md b/_features/distributed-training.md deleted file mode 100644 index b7e5ad35ad8b..000000000000 --- a/_features/distributed-training.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Distributed Training -order: 3 -snippet: > - ```python - import torch.distributed as dist - from torch.nn.parallel import DistributedDataParallel - - dist.init_process_group(backend='gloo') - model = DistributedDataParallel(model) - ``` - -summary-home: Scalable distributed training and performance optimization in research and production is enabled by the torch.distributed backend. -featured-home: true - ---- - -Optimize performance in both research and production by taking advantage of native support for asynchronous execution of collective operations and peer-to-peer communication that is accessible from Python and C++. diff --git a/_features/mobile.md b/_features/mobile.md deleted file mode 100644 index 31214d52751f..000000000000 --- a/_features/mobile.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: Mobile (Experimental) -order: 4 -snippet: > - ```python - ## Save your model - torch.jit.script(model).save("my_mobile_model.pt") - - ## iOS prebuilt binary - pod ‘LibTorch’ - ## Android prebuilt binary - implementation 'org.pytorch:pytorch_android:1.3.0' - - ## Run your model (Android example) - Tensor input = Tensor.fromBlob(data, new long[]{1, data.length}); - IValue output = module.forward(IValue.tensor(input)); - float[] scores = output.getTensor().getDataAsFloatArray(); - ``` - -summary-home: PyTorch supports an end-to-end workflow from Python to deployment on iOS and Android. It extends the PyTorch API to cover common preprocessing and integration tasks needed for incorporating ML in mobile applications. -featured-home: false - ---- - -PyTorch supports an end-to-end workflow from Python to deployment on iOS and Android. It extends the PyTorch API to cover common preprocessing and integration tasks needed for incorporating ML in mobile applications. diff --git a/_features/native-onnx-support.md b/_features/native-onnx-support.md deleted file mode 100644 index 1c7734e9ed77..000000000000 --- a/_features/native-onnx-support.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Native ONNX Support -order: 6 -snippet: > - ```python - import torch.onnx - import torchvision - - dummy_input = torch.randn(1, 3, 224, 224) - model = torchvision.models.alexnet(pretrained=True) - torch.onnx.export(model, dummy_input, "alexnet.onnx") - ``` ---- - -Export models in the standard ONNX (Open Neural Network Exchange) format for direct access to ONNX-compatible platforms, runtimes, visualizers, and more. diff --git a/_features/production-ready.md b/_features/production-ready.md deleted file mode 100644 index 151de0f9b644..000000000000 --- a/_features/production-ready.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Production Ready -order: 1 -snippet: > - ```python - import torch - class MyModule(torch.nn.Module): - - def __init__(self, N, M): - super(MyModule, self).__init__() - self.weight = torch.nn.Parameter(torch.rand(N, M)) - - def forward(self, input): - if input.sum() > 0: - output = self.weight.mv(input) - else: - output = self.weight + input - return output - - # Compile the model code to a static representation - my_script_module = torch.jit.script(MyModule(3, 4)) - - # Save the compiled code and model data so it can be loaded elsewhere - my_script_module.save("my_script_module.pt") - ``` - -summary-home: Transition seamlessly between eager and graph modes with TorchScript, and accelerate the path to production with TorchServe. -featured-home: true - ---- - -With TorchScript, PyTorch provides ease-of-use and flexibility in eager mode, while seamlessly transitioning to graph mode for speed, optimization, and functionality in C++ runtime environments. diff --git a/_features/robust-ecosystem.md b/_features/robust-ecosystem.md deleted file mode 100644 index f44406d6e801..000000000000 --- a/_features/robust-ecosystem.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Robust Ecosystem -order: 5 -snippet: > - ```python - import torchvision.models as models - resnet18 = models.resnet18(pretrained=True) - alexnet = models.alexnet(pretrained=True) - squeezenet = models.squeezenet1_0(pretrained=True) - vgg16 = models.vgg16(pretrained=True) - densenet = models.densenet161(pretrained=True) - inception = models.inception_v3(pretrained=True) - ``` - -summary-home: A rich ecosystem of tools and libraries extends PyTorch and supports development in computer vision, NLP and more. -featured-home: true - ---- - -An active community of researchers and developers have built a rich ecosystem of tools and libraries for extending PyTorch and supporting development in areas from computer vision to reinforcement learning. diff --git a/_features/torchserve.md b/_features/torchserve.md deleted file mode 100644 index 4460014cd541..000000000000 --- a/_features/torchserve.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: TorchServe -order: 2 -snippet: > - ```python - ## Convert the model from PyTorch to TorchServe format - torch-model-archiver --model-name densenet161 \ - --version 1.0 --model-file serve/examples/image_classifier/densenet_161/model.py \ - --serialized-file densenet161-8d451a50.pth \ - --extra-files serve/examples/image_classifier/index_to_name.json \ - --handler image_classifier - - ## Host your PyTorch model - - torchserve --start --model-store model_store --models densenet161=densenet161.mar - ``` - -summary-home: TorchServe is an easy to use tool for deploying PyTorch models at scale. It is cloud and environment agnostic and supports features such as multi-model serving, logging, metrics and the creation of RESTful endpoints for application integration. -featured-home: false - ---- - -TorchServe is an easy to use tool for deploying PyTorch models at scale. It is cloud and environment agnostic and supports features such as multi-model serving, logging, metrics and the creation of RESTful endpoints for application integration. diff --git a/_get_started/get-started-locally.md b/_get_started/get-started-locally.md deleted file mode 100644 index 6a95566a3946..000000000000 --- a/_get_started/get-started-locally.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -layout: get_started -title: Start Locally -permalink: /get-started/locally/ -background-class: get-started-background -body-class: get-started -order: 0 -published: true -get-started-locally: true -redirect_from: "/get-started/" ---- - -## Start Locally - -
-
-
- {% include quick_start_local.html %} -
-
-
- ---- - -{% capture mac %} -{% include_relative installation/mac.md %} -{% endcapture %} - -{% capture linux %} -{% include_relative installation/linux.md %} -{% endcapture %} - -{% capture windows %} -{% include_relative installation/windows.md %} -{% endcapture %} - - -
-
{{ mac | markdownify }}
-
{{ linux | markdownify }}
-
{{ windows | markdownify }}
-
- - - - - diff --git a/_get_started/get-started-via-cloud-partners.md b/_get_started/get-started-via-cloud-partners.md deleted file mode 100644 index 6fba614843af..000000000000 --- a/_get_started/get-started-via-cloud-partners.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -layout: get_started -title: Start via Cloud Partners -permalink: /get-started/cloud-partners/ -background-class: get-started-background -body-class: get-started -order: 3 -published: true -get-started-via-cloud: true ---- - -## Start via Cloud Partners - -
-
-

Cloud platforms provide powerful hardware and infrastructure for training and deploying deep learning models. Select a cloud platform below to get started with PyTorch.

- {% include quick_start_cloud_options.html %} -
-
- ---- - -{% capture aws %} -{% include_relative installation/aws.md %} -{% endcapture %} - -{% capture azure %} -{% include_relative installation/azure.md %} -{% endcapture %} - -{% capture google-cloud %} -{% include_relative installation/google-cloud.md %} -{% endcapture %} - -{% capture lightning-studios %} -{% include_relative installation/lightning-studios.md %} -{% endcapture %} - -
-
{{aws | markdownify }}
-
{{google-cloud | markdownify }}
-
{{azure | markdownify }}
-
{{lightning-studios | markdownify }}
-
- - - - - diff --git a/_get_started/get-started-via-colab.md b/_get_started/get-started-via-colab.md deleted file mode 100644 index 940ef0bc8a56..000000000000 --- a/_get_started/get-started-via-colab.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -layout: get_started -title: Try Now via CoLab -permalink: /get-started/colab/ -background-class: get-started-background -body-class: get-started -order: 10 ---- - -## Try Now via CoLab - -Lorem ipsum dolor sit amet, ex mei graeco alienum imperdiet. Recusabo consequuntur mei ei, habeo iriure virtute eam cu, in erat placerat vis. Eu mea nostrum inimicus, cum id aeque utamur erroribus. - -Lorem ipsum dolor sit amet, ex mei graeco alienum imperdiet. Recusabo consequuntur mei ei, habeo iriure virtute eam cu, in erat placerat vis. Eu mea nostrum inimicus, cum id aeque utamur erroribus. - -{% highlight python %} -#!/usr/bin/python3 - -# Print the contents of the files listed on the command line. - -import sys - -for fn in sys.argv[1:]: - try: - fin = open(fn, 'r') - except: - (type, detail) = sys.exc_info()[:2] - print("\n*** %s: %s: %s ***" % (fn, type, detail)) - continue - print("\n*** Contents of", fn, "***") - - # Print the file, with line numbers. - lno = 1 - while 1: - line = fin.readline() - if not line: break; - print('%3d: %-s' % (lno, line[:-1])) - lno = lno + 1 - fin.close() -print() -{% endhighlight %} - -Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. - - - - - diff --git a/_get_started/mobile.md b/_get_started/mobile.md deleted file mode 100644 index d709ee61e2f8..000000000000 --- a/_get_started/mobile.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -layout: get_started -title: PyTorch for Edge -permalink: /get-started/executorch/ -background-class: get-started-background -body-class: get-started -order: 5 -published: true ---- - -## Get Started with PyTorch ExecuTorch - -PyTorch’s edge specific library is [ExecuTorch](https://github.com/pytorch/executorch/) and is designed to be lightweight, very performant even on devices with constrained hardware such as mobile phones, embedded systems and microcontrollers. - -ExecuTorch relies heavily on PyTorch core technologies such as [torch.compile](https://pytorch.org/docs/stable/torch.compiler.html) and [torch.export](https://pytorch.org/docs/stable/export.html), and should be very familiar to anyone who has used PyTorch in the past. - -### Getting Started -You can get started by following the [general getting started guide](https://pytorch.org/executorch/stable/getting-started.html#) or jump to the specific steps for your target device. - -* [Using ExecuTorch on Android](https://pytorch.org/executorch/stable/using-executorch-android.html) -* [Using ExecuTorch on iOS](https://pytorch.org/executorch/stable/using-executorch-ios.html) -* [Using ExecuTorch with C++](https://pytorch.org/executorch/stable/using-executorch-cpp.html) - -### Hardware Acceleration -ExecuTorch provides out of the box hardware acceleration for a growing number of chip manufacturers. See the following resources to learn more about how to leverage them: - -* [Backend Overview](https://pytorch.org/executorch/stable/backends-overview.html) -* [XNNPACK](https://pytorch.org/executorch/stable/backends-xnnpack.html) -* [Core ML](https://pytorch.org/executorch/stable/backends-coreml.html) -* [MPS](https://pytorch.org/executorch/stable/backends-mps.html) -* [Vulkan](https://pytorch.org/executorch/stable/backends-vulkan.html) -* [ARM Ethos-U](https://pytorch.org/executorch/stable/backends-arm-ethos-u.html) -* [Qualcomm AI Engine](https://pytorch.org/executorch/stable/backends-qualcomm.html) -* [MediaTek](https://pytorch.org/executorch/stable/backends-mediatek.html) -* [Cadence Xtensa](https://pytorch.org/executorch/stable/backends-cadence.html) - - - - diff --git a/_get_started/previous-versions.md b/_get_started/previous-versions.md deleted file mode 100644 index d86ae87de17e..000000000000 --- a/_get_started/previous-versions.md +++ /dev/null @@ -1,1980 +0,0 @@ ---- -layout: get_started -title: Previous PyTorch Versions -permalink: /get-started/previous-versions/ -background-class: get-started-background -body-class: get-started -order: 4 -published: true -redirect_from: /previous-versions.html ---- - -## Installing previous versions of PyTorch - -We'd prefer you install the [latest version](https://pytorch.org/get-started/locally), -but old binaries and installation instructions are provided below for -your convenience. - -## Commands for Versions >= 1.0.0 - -### v2.6.0 - -#### Wheel - -##### OSX - -``` -pip install torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 -``` - -##### Linux and Windows - -``` -# ROCM 6.1 (Linux only) -pip install torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 --index-url https://download.pytorch.org/whl/rocm6.1 -# ROCM 6.2.4 (Linux only) -pip install torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 --index-url https://download.pytorch.org/whl/rocm6.2.4 -# CUDA 11.8 -pip install torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 --index-url https://download.pytorch.org/whl/cu118 -# CUDA 12.4 -pip install torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 --index-url https://download.pytorch.org/whl/cu124 -# CUDA 12.6 -pip install torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 --index-url https://download.pytorch.org/whl/cu126 -# CPU only -pip install torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 --index-url https://download.pytorch.org/whl/cpu -``` - -### v2.5.1 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 11.8 -conda install pytorch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 pytorch-cuda=11.8 -c pytorch -c nvidia -# CUDA 12.1 -conda install pytorch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 pytorch-cuda=12.1 -c pytorch -c nvidia -# CUDA 12.4 -conda install pytorch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 pytorch-cuda=12.4 -c pytorch -c nvidia -# CPU Only -conda install pytorch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 -``` - -##### Linux and Windows - -``` -# ROCM 6.1 (Linux only) -pip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/rocm6.1 -# ROCM 6.2 (Linux only) -pip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/rocm6.2 -# CUDA 11.8 -pip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu118 -# CUDA 12.1 -pip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu121 -# CUDA 12.4 -pip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu124 -# CPU only -pip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cpu -``` - -### v2.5.0 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 11.8 -conda install pytorch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 pytorch-cuda=11.8 -c pytorch -c nvidia -# CUDA 12.1 -conda install pytorch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 pytorch-cuda=12.1 -c pytorch -c nvidia -# CUDA 12.4 -conda install pytorch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 pytorch-cuda=12.4 -c pytorch -c nvidia -# CPU Only -conda install pytorch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 -``` - -##### Linux and Windows - -``` -# ROCM 6.1 (Linux only) -pip install torch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 --index-url https://download.pytorch.org/whl/rocm6.1 -# ROCM 6.2 (Linux only) -pip install torch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 --index-url https://download.pytorch.org/whl/rocm6.2 -# CUDA 11.8 -pip install torch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 --index-url https://download.pytorch.org/whl/cu118 -# CUDA 12.1 -pip install torch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 --index-url https://download.pytorch.org/whl/cu121 -# CUDA 12.4 -pip install torch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 --index-url https://download.pytorch.org/whl/cu124 -# CPU only -pip install torch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 --index-url https://download.pytorch.org/whl/cpu -``` - -### v2.4.1 -#### Conda -##### OSX -``` -# conda -conda install pytorch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1 -c pytorch -``` -##### Linux and Windows -``` -# CUDA 11.8 -conda install pytorch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1 pytorch-cuda=11.8 -c pytorch -c nvidia -# CUDA 12.1 -conda install pytorch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1 pytorch-cuda=12.1 -c pytorch -c nvidia -# CUDA 12.4 -conda install pytorch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1 pytorch-cuda=12.4 -c pytorch -c nvidia -# CPU Only -conda install pytorch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1 cpuonly -c pytorch -``` -#### Wheel -##### OSX -``` -pip install torch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1 -``` -##### Linux and Windows -``` -# ROCM 6.1 (Linux only) -pip install torch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1 --index-url https://download.pytorch.org/whl/rocm6.1 -# CUDA 11.8 -pip install torch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1 --index-url https://download.pytorch.org/whl/cu118 -# CUDA 12.1 -pip install torch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1 --index-url https://download.pytorch.org/whl/cu121 -# CUDA 12.4 -pip install torch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1 --index-url https://download.pytorch.org/whl/cu124 -# CPU only -pip install torch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1 --index-url https://download.pytorch.org/whl/cpu -``` - -### v2.4.0 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 11.8 -conda install pytorch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 pytorch-cuda=11.8 -c pytorch -c nvidia -# CUDA 12.1 -conda install pytorch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 pytorch-cuda=12.1 -c pytorch -c nvidia -# CUDA 12.4 -conda install pytorch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 pytorch-cuda=12.4 -c pytorch -c nvidia -# CPU Only -conda install pytorch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 -``` - -##### Linux and Windows - -``` -# ROCM 6.1 (Linux only) -pip install torch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 --index-url https://download.pytorch.org/whl/rocm6.1 -# CUDA 11.8 -pip install torch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 --index-url https://download.pytorch.org/whl/cu118 -# CUDA 12.1 -pip install torch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 --index-url https://download.pytorch.org/whl/cu121 -# CUDA 12.4 -pip install torch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 --index-url https://download.pytorch.org/whl/cu124 -# CPU only -pip install torch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 --index-url https://download.pytorch.org/whl/cpu -``` - -### v2.3.1 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 11.8 -conda install pytorch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 pytorch-cuda=11.8 -c pytorch -c nvidia -# CUDA 12.1 -conda install pytorch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 pytorch-cuda=12.1 -c pytorch -c nvidia -# CPU Only -conda install pytorch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 -``` - -##### Linux and Windows - -``` -# ROCM 6.0 (Linux only) -pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --index-url https://download.pytorch.org/whl/rocm6.0 -# CUDA 11.8 -pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --index-url https://download.pytorch.org/whl/cu118 -# CUDA 12.1 -pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --index-url https://download.pytorch.org/whl/cu121 -# CPU only -pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --index-url https://download.pytorch.org/whl/cpu -``` - -### v2.3.0 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==2.3.0 torchvision==0.18.0 torchaudio==2.3.0 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 11.8 -conda install pytorch==2.3.0 torchvision==0.18.0 torchaudio==2.3.0 pytorch-cuda=11.8 -c pytorch -c nvidia -# CUDA 12.1 -conda install pytorch==2.3.0 torchvision==0.18.0 torchaudio==2.3.0 pytorch-cuda=12.1 -c pytorch -c nvidia -# CPU Only -conda install pytorch==2.3.0 torchvision==0.18.0 torchaudio==2.3.0 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==2.3.0 torchvision==0.18.0 torchaudio==2.3.0 -``` - -##### Linux and Windows - -``` -# ROCM 6.0 (Linux only) -pip install torch==2.3.0 torchvision==0.18.0 torchaudio==2.3.0 --index-url https://download.pytorch.org/whl/rocm6.0 -# CUDA 11.8 -pip install torch==2.3.0 torchvision==0.18.0 torchaudio==2.3.0 --index-url https://download.pytorch.org/whl/cu118 -# CUDA 12.1 -pip install torch==2.3.0 torchvision==0.18.0 torchaudio==2.3.0 --index-url https://download.pytorch.org/whl/cu121 -# CPU only -pip install torch==2.3.0 torchvision==0.18.0 torchaudio==2.3.0 --index-url https://download.pytorch.org/whl/cpu -``` - -### v2.2.2 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 11.8 -conda install pytorch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 pytorch-cuda=11.8 -c pytorch -c nvidia -# CUDA 12.1 -conda install pytorch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 pytorch-cuda=12.1 -c pytorch -c nvidia -# CPU Only -conda install pytorch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 -``` - -##### Linux and Windows - -``` -# ROCM 5.7 (Linux only) -pip install torch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 --index-url https://download.pytorch.org/whl/rocm5.7 -# CUDA 11.8 -pip install torch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 --index-url https://download.pytorch.org/whl/cu118 -# CUDA 12.1 -pip install torch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 --index-url https://download.pytorch.org/whl/cu121 -# CPU only -pip install torch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 --index-url https://download.pytorch.org/whl/cpu -``` - -### v2.2.1 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 11.8 -conda install pytorch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 pytorch-cuda=11.8 -c pytorch -c nvidia -# CUDA 12.1 -conda install pytorch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 pytorch-cuda=12.1 -c pytorch -c nvidia -# CPU Only -conda install pytorch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 -``` - -##### Linux and Windows - -``` -# ROCM 5.7 (Linux only) -pip install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/rocm5.7 -# CUDA 11.8 -pip install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cu118 -# CUDA 12.1 -pip install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cu121 -# CPU only -pip install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cpu -``` - -### v2.2.0 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 11.8 -conda install pytorch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 pytorch-cuda=11.8 -c pytorch -c nvidia -# CUDA 12.1 -conda install pytorch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 pytorch-cuda=12.1 -c pytorch -c nvidia -# CPU Only -conda install pytorch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 -``` - -##### Linux and Windows - -``` -# ROCM 5.6 (Linux only) -pip install torch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 --index-url https://download.pytorch.org/whl/rocm5.6 -# CUDA 11.8 -pip install torch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 --index-url https://download.pytorch.org/whl/cu118 -# CUDA 12.1 -pip install torch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 --index-url https://download.pytorch.org/whl/cu121 -# CPU only -pip install torch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 --index-url https://download.pytorch.org/whl/cpu -``` - -### v2.1.2 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 11.8 -conda install pytorch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 pytorch-cuda=11.8 -c pytorch -c nvidia -# CUDA 12.1 -conda install pytorch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 pytorch-cuda=12.1 -c pytorch -c nvidia -# CPU Only -conda install pytorch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 -``` - -##### Linux and Windows - -``` -# ROCM 5.6 (Linux only) -pip install torch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 --index-url https://download.pytorch.org/whl/rocm5.6 -# CUDA 11.8 -pip install torch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 --index-url https://download.pytorch.org/whl/cu118 -# CUDA 12.1 -pip install torch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 --index-url https://download.pytorch.org/whl/cu121 -# CPU only -pip install torch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 --index-url https://download.pytorch.org/whl/cpu -``` - -### v2.1.1 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 11.8 -conda install pytorch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 pytorch-cuda=11.8 -c pytorch -c nvidia -# CUDA 12.1 -conda install pytorch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 pytorch-cuda=12.1 -c pytorch -c nvidia -# CPU Only -conda install pytorch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 -``` - -##### Linux and Windows - -``` -# ROCM 5.6 (Linux only) -pip install torch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 --index-url https://download.pytorch.org/whl/rocm5.6 -# CUDA 11.8 -pip install torch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 --index-url https://download.pytorch.org/whl/cu118 -# CUDA 12.1 -pip install torch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 --index-url https://download.pytorch.org/whl/cu121 -# CPU only -pip install torch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 --index-url https://download.pytorch.org/whl/cpu -``` - -### v2.1.0 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 11.8 -conda install pytorch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 pytorch-cuda=11.8 -c pytorch -c nvidia -# CUDA 12.1 -conda install pytorch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 pytorch-cuda=12.1 -c pytorch -c nvidia -# CPU Only -conda install pytorch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 -``` - -##### Linux and Windows - -``` -# ROCM 5.6 (Linux only) -pip install torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 --index-url https://download.pytorch.org/whl/rocm5.6 -# CUDA 11.8 -pip install torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 --index-url https://download.pytorch.org/whl/cu118 -# CUDA 12.1 -pip install torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 --index-url https://download.pytorch.org/whl/cu121 -# CPU only -pip install torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 --index-url https://download.pytorch.org/whl/cpu -``` - -### v2.0.1 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 11.7 -conda install pytorch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 pytorch-cuda=11.7 -c pytorch -c nvidia -# CUDA 11.8 -conda install pytorch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 pytorch-cuda=11.8 -c pytorch -c nvidia -# CPU Only -conda install pytorch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 -``` - -##### Linux and Windows - -``` -# ROCM 5.4.2 (Linux only) -pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/rocm5.4.2 -# CUDA 11.7 -pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 -# CUDA 11.8 -pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cu118 -# CPU only -pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cpu -``` - -### v2.0.0 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==2.0.0 torchvision==0.15.0 torchaudio==2.0.0 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 11.7 -conda install pytorch==2.0.0 torchvision==0.15.0 torchaudio==2.0.0 pytorch-cuda=11.7 -c pytorch -c nvidia -# CUDA 11.8 -conda install pytorch==2.0.0 torchvision==0.15.0 torchaudio==2.0.0 pytorch-cuda=11.8 -c pytorch -c nvidia -# CPU Only -conda install pytorch==2.0.0 torchvision==0.15.0 torchaudio==2.0.0 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==2.0.0 torchvision==0.15.1 torchaudio==2.0.1 -``` - -##### Linux and Windows - -``` -# ROCM 5.4.2 (Linux only) -pip install torch==2.0.0 torchvision==0.15.1 torchaudio==2.0.1 --index-url https://download.pytorch.org/whl/rocm5.4.2 -# CUDA 11.7 -pip install torch==2.0.0 torchvision==0.15.1 torchaudio==2.0.1 -# CUDA 11.8 -pip install torch==2.0.0 torchvision==0.15.1 torchaudio==2.0.1 --index-url https://download.pytorch.org/whl/cu118 -# CPU only -pip install torch==2.0.0 torchvision==0.15.1 torchaudio==2.0.1 --index-url https://download.pytorch.org/whl/cpu -``` - -### v1.13.1 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 11.6 -conda install pytorch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 pytorch-cuda=11.6 -c pytorch -c nvidia -# CUDA 11.7 -conda install pytorch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 pytorch-cuda=11.7 -c pytorch -c nvidia -# CPU Only -conda install pytorch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 -``` - -##### Linux and Windows - -``` -# ROCM 5.2 (Linux only) -pip install torch==1.13.1+rocm5.2 torchvision==0.14.1+rocm5.2 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/rocm5.2 -# CUDA 11.6 -pip install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116 -# CUDA 11.7 -pip install torch==1.13.1+cu117 torchvision==0.14.1+cu117 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu117 -# CPU only -pip install torch==1.13.1+cpu torchvision==0.14.1+cpu torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cpu -``` - -### v1.13.0 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==1.13.0 torchvision==0.14.0 torchaudio==0.13.0 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 11.6 -conda install pytorch==1.13.0 torchvision==0.14.0 torchaudio==0.13.0 pytorch-cuda=11.6 -c pytorch -c nvidia -# CUDA 11.7 -conda install pytorch==1.13.0 torchvision==0.14.0 torchaudio==0.13.0 pytorch-cuda=11.7 -c pytorch -c nvidia -# CPU Only -conda install pytorch==1.13.0 torchvision==0.14.0 torchaudio==0.13.0 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==1.13.0 torchvision==0.14.0 torchaudio==0.13.0 -``` - -##### Linux and Windows - -``` -# ROCM 5.2 (Linux only) -pip install torch==1.13.0+rocm5.2 torchvision==0.14.0+rocm5.2 torchaudio==0.13.0 --extra-index-url https://download.pytorch.org/whl/rocm5.2 -# CUDA 11.6 -pip install torch==1.13.0+cu116 torchvision==0.14.0+cu116 torchaudio==0.13.0 --extra-index-url https://download.pytorch.org/whl/cu116 -# CUDA 11.7 -pip install torch==1.13.0+cu117 torchvision==0.14.0+cu117 torchaudio==0.13.0 --extra-index-url https://download.pytorch.org/whl/cu117 -# CPU only -pip install torch==1.13.0+cpu torchvision==0.14.0+cpu torchaudio==0.13.0 --extra-index-url https://download.pytorch.org/whl/cpu -``` - -### v1.12.1 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 10.2 -conda install pytorch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1 cudatoolkit=10.2 -c pytorch -# CUDA 11.3 -conda install pytorch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1 cudatoolkit=11.3 -c pytorch -# CUDA 11.6 -conda install pytorch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1 cudatoolkit=11.6 -c pytorch -c conda-forge -# CPU Only -conda install pytorch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1 -``` - -##### Linux and Windows - -``` -# ROCM 5.1.1 (Linux only) -pip install torch==1.12.1+rocm5.1.1 torchvision==0.13.1+rocm5.1.1 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/rocm5.1.1 -# CUDA 11.6 -pip install torch==1.12.1+cu116 torchvision==0.13.1+cu116 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu116 -# CUDA 11.3 -pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu113 -# CUDA 10.2 -pip install torch==1.12.1+cu102 torchvision==0.13.1+cu102 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu102 -# CPU only -pip install torch==1.12.1+cpu torchvision==0.13.1+cpu torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cpu -``` - -### v1.12.0 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==1.12.0 torchvision==0.13.0 torchaudio==0.12.0 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 10.2 -conda install pytorch==1.12.0 torchvision==0.13.0 torchaudio==0.12.0 cudatoolkit=10.2 -c pytorch -# CUDA 11.3 -conda install pytorch==1.12.0 torchvision==0.13.0 torchaudio==0.12.0 cudatoolkit=11.3 -c pytorch -# CUDA 11.6 -conda install pytorch==1.12.0 torchvision==0.13.0 torchaudio==0.12.0 cudatoolkit=11.6 -c pytorch -c conda-forge -# CPU Only -conda install pytorch==1.12.0 torchvision==0.13.0 torchaudio==0.12.0 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==1.12.0 torchvision==0.13.0 torchaudio==0.12.0 -``` - -##### Linux and Windows - -``` -# ROCM 5.1.1 (Linux only) -pip install torch==1.12.0+rocm5.1.1 torchvision==0.13.0+rocm5.1.1 torchaudio==0.12.0 --extra-index-url https://download.pytorch.org/whl/rocm5.1.1 -# CUDA 11.6 -pip install torch==1.12.0+cu116 torchvision==0.13.0+cu116 torchaudio==0.12.0 --extra-index-url https://download.pytorch.org/whl/cu116 -# CUDA 11.3 -pip install torch==1.12.0+cu113 torchvision==0.13.0+cu113 torchaudio==0.12.0 --extra-index-url https://download.pytorch.org/whl/cu113 -# CUDA 10.2 -pip install torch==1.12.0+cu102 torchvision==0.13.0+cu102 torchaudio==0.12.0 --extra-index-url https://download.pytorch.org/whl/cu102 -# CPU only -pip install torch==1.12.0+cpu torchvision==0.13.0+cpu torchaudio==0.12.0 --extra-index-url https://download.pytorch.org/whl/cpu -``` - -### v1.11.0 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==1.11.0 torchvision==0.12.0 torchaudio==0.11.0 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 10.2 -conda install pytorch==1.11.0 torchvision==0.12.0 torchaudio==0.11.0 cudatoolkit=10.2 -c pytorch - -# CUDA 11.3 -conda install pytorch==1.11.0 torchvision==0.12.0 torchaudio==0.11.0 cudatoolkit=11.3 -c pytorch - -# CPU Only -conda install pytorch==1.11.0 torchvision==0.12.0 torchaudio==0.11.0 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==1.11.0 torchvision==0.12.0 torchaudio==0.11.0 -``` - -##### Linux and Windows - -``` -# ROCM 4.5.2 (Linux only) -pip install torch==1.11.0+rocm4.5.2 torchvision==0.12.0+rocm4.5.2 torchaudio==0.11.0 --extra-index-url https://download.pytorch.org/whl/rocm4.5.2 - -# CUDA 11.3 -pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 torchaudio==0.11.0 --extra-index-url https://download.pytorch.org/whl/cu113 - -# CUDA 10.2 -pip install torch==1.11.0+cu102 torchvision==0.12.0+cu102 torchaudio==0.11.0 --extra-index-url https://download.pytorch.org/whl/cu102 - -# CPU only -pip install torch==1.11.0+cpu torchvision==0.12.0+cpu torchaudio==0.11.0 --extra-index-url https://download.pytorch.org/whl/cpu -``` - -### v1.10.1 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==1.10.1 torchvision==0.11.2 torchaudio==0.10.1 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 10.2 -conda install pytorch==1.10.1 torchvision==0.11.2 torchaudio==0.10.1 cudatoolkit=10.2 -c pytorch - -# CUDA 11.3 -conda install pytorch==1.10.1 torchvision==0.11.2 torchaudio==0.10.1 cudatoolkit=11.3 -c pytorch -c conda-forge - -# CPU Only -conda install pytorch==1.10.1 torchvision==0.11.2 torchaudio==0.10.1 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==1.10.1 torchvision==0.11.2 torchaudio==0.10.1 -``` - -##### Linux and Windows - -``` -# ROCM 4.2 (Linux only) -pip install torch==1.10.1+rocm4.2 torchvision==0.11.2+rocm4.2 torchaudio==0.10.1 -f https://download.pytorch.org/whl/rocm4.2/torch_stable.html - -# ROCM 4.1 (Linux only) -pip install torch==1.10.1+rocm4.1 torchvision==0.11.2+rocm4.1 torchaudio==0.10.1 -f https://download.pytorch.org/whl/torch_stable.html - -# ROCM 4.0.1 (Linux only) -pip install torch==1.10.1+rocm4.0.1 torchvision==0.10.2+rocm4.0.1 torchaudio==0.10.1 -f https://download.pytorch.org/whl/torch_stable.html - -# CUDA 11.1 -pip install torch==1.10.1+cu111 torchvision==0.11.2+cu111 torchaudio==0.10.1 -f https://download.pytorch.org/whl/cu111/torch_stable.html - -# CUDA 10.2 -pip install torch==1.10.1+cu102 torchvision==0.11.2+cu102 torchaudio==0.10.1 -f https://download.pytorch.org/whl/cu102/torch_stable.html - -# CPU only -pip install torch==1.10.1+cpu torchvision==0.11.2+cpu torchaudio==0.10.1 -f https://download.pytorch.org/whl/cpu/torch_stable.html -``` - - -### v1.10.0 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==1.10.0 torchvision==0.11.0 torchaudio==0.10.0 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 10.2 -conda install pytorch==1.10.0 torchvision==0.11.0 torchaudio==0.10.0 cudatoolkit=10.2 -c pytorch - -# CUDA 11.3 -conda install pytorch==1.10.0 torchvision==0.11.0 torchaudio==0.10.0 cudatoolkit=11.3 -c pytorch -c conda-forge - -# CPU Only -conda install pytorch==1.10.0 torchvision==0.11.0 torchaudio==0.10.0 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==1.10.0 torchvision==0.11.0 torchaudio==0.10.0 -``` - -##### Linux and Windows - -``` -# ROCM 4.2 (Linux only) -pip install torch==1.10.0+rocm4.2 torchvision==0.11.0+rocm4.2 torchaudio==0.10.0 -f https://download.pytorch.org/whl/torch_stable.html - -# ROCM 4.1 (Linux only) -pip install torch==1.10.0+rocm4.1 torchvision==0.11.0+rocm4.1 torchaudio==0.10.0 -f https://download.pytorch.org/whl/torch_stable.html - -# ROCM 4.0.1 (Linux only) -pip install torch==1.10.0+rocm4.0.1 torchvision==0.10.1+rocm4.0.1 torchaudio==0.10.0 -f https://download.pytorch.org/whl/torch_stable.html - -# CUDA 11.1 -pip install torch==1.10.0+cu111 torchvision==0.11.0+cu111 torchaudio==0.10.0 -f https://download.pytorch.org/whl/torch_stable.html - -# CUDA 10.2 -pip install torch==1.10.0+cu102 torchvision==0.11.0+cu102 torchaudio==0.10.0 -f https://download.pytorch.org/whl/torch_stable.html - -# CPU only -pip install torch==1.10.0+cpu torchvision==0.11.0+cpu torchaudio==0.10.0 -f https://download.pytorch.org/whl/torch_stable.html -``` - - -### v1.9.1 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==1.9.1 torchvision==0.10.1 torchaudio==0.9.1 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 10.2 -conda install pytorch==1.9.1 torchvision==0.10.1 torchaudio==0.9.1 cudatoolkit=10.2 -c pytorch - -# CUDA 11.3 -conda install pytorch==1.9.1 torchvision==0.10.1 torchaudio==0.9.1 cudatoolkit=11.3 -c pytorch -c conda-forge - -# CPU Only -conda install pytorch==1.9.1 torchvision==0.10.1 torchaudio==0.9.1 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==1.9.1 torchvision==0.10.1 torchaudio==0.9.1 -``` - -##### Linux and Windows - -``` -# ROCM 4.2 (Linux only) -pip install torch==1.9.1+rocm4.2 torchvision==0.10.1+rocm4.2 torchaudio==0.9.1 -f https://download.pytorch.org/whl/torch_stable.html - -# ROCM 4.1 (Linux only) -pip install torch==1.9.1+rocm4.1 torchvision==0.10.1+rocm4.1 torchaudio==0.9.1 -f https://download.pytorch.org/whl/torch_stable.html - -# ROCM 4.0.1 (Linux only) -pip install torch==1.9.1+rocm4.0.1 torchvision==0.10.1+rocm4.0.1 torchaudio==0.9.1 -f https://download.pytorch.org/whl/torch_stable.html - -# CUDA 11.1 -pip install torch==1.9.1+cu111 torchvision==0.10.1+cu111 torchaudio==0.9.1 -f https://download.pytorch.org/whl/torch_stable.html - -# CUDA 10.2 -pip install torch==1.9.1+cu102 torchvision==0.10.1+cu102 torchaudio==0.9.1 -f https://download.pytorch.org/whl/torch_stable.html - -# CPU only -pip install torch==1.9.1+cpu torchvision==0.10.1+cpu torchaudio==0.9.1 -f https://download.pytorch.org/whl/torch_stable.html -``` - -### v1.9.0 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==1.9.0 torchvision==0.10.0 torchaudio==0.9.0 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 10.2 -conda install pytorch==1.9.0 torchvision==0.10.0 torchaudio==0.9.0 cudatoolkit=10.2 -c pytorch - -# CUDA 11.3 -conda install pytorch==1.9.0 torchvision==0.10.0 torchaudio==0.9.0 cudatoolkit=11.3 -c pytorch -c conda-forge - -# CPU Only -conda install pytorch==1.9.0 torchvision==0.10.0 torchaudio==0.9.0 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==1.9.0 torchvision==0.10.0 torchaudio==0.9.0 -``` - -##### Linux and Windows - -``` -# ROCM 4.2 (Linux only) -pip install torch==1.9.0+rocm4.2 torchvision==0.10.0+rocm4.2 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html - -# ROCM 4.1 (Linux only) -pip install torch==1.9.0+rocm4.1 torchvision==0.10.0+rocm4.1 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html - -# ROCM 4.0.1 (Linux only) -pip install torch==1.9.0+rocm4.0.1 torchvision==0.10.0+rocm4.0.1 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html - -# CUDA 11.1 -pip install torch==1.9.0+cu111 torchvision==0.10.0+cu111 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html - -# CUDA 10.2 -pip install torch==1.9.0+cu102 torchvision==0.10.0+cu102 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html - -# CPU only -pip install torch==1.9.0+cpu torchvision==0.10.0+cpu torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html -``` - -### v1.8.2 with LTS support - -#### Conda - -##### OSX - -macOS is currently not supported for LTS. - -##### Linux and Windows - -``` -# CUDA 10.2 -# NOTE: PyTorch LTS version 1.8.2 is only supported for Python <= 3.8. -conda install pytorch torchvision torchaudio cudatoolkit=10.2 -c pytorch-lts - -# CUDA 11.1 (Linux) -# NOTE: 'nvidia' channel is required for cudatoolkit 11.1
NOTE: Pytorch LTS version 1.8.2 is only supported for Python <= 3.8. -conda install pytorch torchvision torchaudio cudatoolkit=11.1 -c pytorch-lts -c nvidia - -# CUDA 11.1 (Windows) -# 'conda-forge' channel is required for cudatoolkit 11.1
NOTE: Pytorch LTS version 1.8.2 is only supported for Python <= 3.8. -conda install pytorch torchvision torchaudio cudatoolkit=11.1 -c pytorch-lts -c conda-forge - -# CPU Only -# Pytorch LTS version 1.8.2 is only supported for Python <= 3.8. -conda install pytorch torchvision torchaudio cpuonly -c pytorch-lts - -# ROCM5.x - -Not supported in LTS. -``` - -#### Wheel - -##### OSX - -macOS is currently not supported in LTS. - -##### Linux and Windows - -``` -# CUDA 10.2 -pip3 install torch==1.8.2 torchvision==0.9.2 torchaudio==0.8.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cu102 - -# CUDA 11.1 -pip3 install torch==1.8.2 torchvision==0.9.2 torchaudio==0.8.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cu111 - -# CPU Only -pip3 install torch==1.8.2 torchvision==0.9.2 torchaudio==0.8.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cpu - -# ROCM5.x - -Not supported in LTS. -``` - -### v1.8.1 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==1.8.1 torchvision==0.9.1 torchaudio==0.8.1 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 10.2 -conda install pytorch==1.8.1 torchvision==0.9.1 torchaudio==0.8.1 cudatoolkit=10.2 -c pytorch - -# CUDA 11.3 -conda install pytorch==1.8.1 torchvision==0.9.1 torchaudio==0.8.1 cudatoolkit=11.3 -c pytorch -c conda-forge - -# CPU Only -conda install pytorch==1.8.1 torchvision==0.9.1 torchaudio==0.8.1 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==1.8.1 torchvision==0.9.1 torchaudio==0.8.1 -``` - -##### Linux and Windows - -``` -# ROCM 4.0.1 (Linux only) -pip install torch==1.8.1+rocm4.0.1 torchvision==0.9.1+rocm4.0.1 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html - -# ROCM 3.10 (Linux only) -pip install torch==1.8.1+rocm3.10 torchvision==0.9.1+rocm3.10 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html - -# CUDA 11.1 -pip install torch==1.8.1+cu111 torchvision==0.9.1+cu111 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html - -# CUDA 10.2 -pip install torch==1.8.1+cu102 torchvision==0.9.1+cu102 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html - -# CUDA 10.1 -pip install torch==1.8.1+cu101 torchvision==0.9.1+cu101 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html - -# CPU only -pip install torch==1.8.1+cpu torchvision==0.9.1+cpu torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html -``` - - -### v1.8.0 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==1.8.0 torchvision==0.9.0 torchaudio==0.8.0 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 10.2 -conda install pytorch==1.8.0 torchvision==0.9.0 torchaudio==0.8.0 cudatoolkit=10.2 -c pytorch - -# CUDA 11.1 -conda install pytorch==1.8.0 torchvision==0.9.0 torchaudio==0.8.0 cudatoolkit=11.1 -c pytorch -c conda-forge - -# CPU Only -conda install pytorch==1.8.0 torchvision==0.9.0 torchaudio==0.8.0 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==1.8.0 torchvision==0.9.0 torchaudio==0.8.0 -``` - -##### Linux and Windows - -``` -# RocM 4.0.1 (Linux only) -pip install torch -f https://download.pytorch.org/whl/rocm4.0.1/torch_stable.html -pip install ninja -pip install 'git+https://github.com/pytorch/vision.git@v0.9.0' - -# CUDA 11.1 -pip install torch==1.8.0+cu111 torchvision==0.9.0+cu111 torchaudio==0.8.0 -f https://download.pytorch.org/whl/torch_stable.html - -# CUDA 10.2 -pip install torch==1.8.0 torchvision==0.9.0 torchaudio==0.8.0 - -# CPU only -pip install torch==1.8.0+cpu torchvision==0.9.0+cpu torchaudio==0.8.0 -f https://download.pytorch.org/whl/torch_stable.html -``` - -### v1.7.1 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 9.2 -conda install pytorch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2 cudatoolkit=9.2 -c pytorch - -# CUDA 10.1 -conda install pytorch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2 cudatoolkit=10.1 -c pytorch - -# CUDA 10.2 -conda install pytorch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2 cudatoolkit=10.2 -c pytorch - -# CUDA 11.0 -conda install pytorch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2 cudatoolkit=11.0 -c pytorch - -# CPU Only -conda install pytorch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2 -``` - -##### Linux and Windows - -``` -# CUDA 11.0 -pip install torch==1.7.1+cu110 torchvision==0.8.2+cu110 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html - -# CUDA 10.2 -pip install torch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2 - -# CUDA 10.1 -pip install torch==1.7.1+cu101 torchvision==0.8.2+cu101 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html - -# CUDA 9.2 -pip install torch==1.7.1+cu92 torchvision==0.8.2+cu92 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html - -# CPU only -pip install torch==1.7.1+cpu torchvision==0.8.2+cpu torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html -``` - -### v1.7.0 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 9.2 -conda install pytorch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0 cudatoolkit=9.2 -c pytorch - -# CUDA 10.1 -conda install pytorch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0 cudatoolkit=10.1 -c pytorch - -# CUDA 10.2 -conda install pytorch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0 cudatoolkit=10.2 -c pytorch - -# CUDA 11.0 -conda install pytorch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0 cudatoolkit=11.0 -c pytorch - -# CPU Only -conda install pytorch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0 -``` - -##### Linux and Windows - -``` -# CUDA 11.0 -pip install torch==1.7.0+cu110 torchvision==0.8.0+cu110 torchaudio==0.7.0 -f https://download.pytorch.org/whl/torch_stable.html - -# CUDA 10.2 -pip install torch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0 - -# CUDA 10.1 -pip install torch==1.7.0+cu101 torchvision==0.8.0+cu101 torchaudio==0.7.0 -f https://download.pytorch.org/whl/torch_stable.html - -# CUDA 9.2 -pip install torch==1.7.0+cu92 torchvision==0.8.0+cu92 torchaudio==0.7.0 -f https://download.pytorch.org/whl/torch_stable.html - -# CPU only -pip install torch==1.7.0+cpu torchvision==0.8.0+cpu torchaudio==0.7.0 -f https://download.pytorch.org/whl/torch_stable.html -``` - -### v1.6.0 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==1.6.0 torchvision==0.7.0 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 9.2 -conda install pytorch==1.6.0 torchvision==0.7.0 cudatoolkit=9.2 -c pytorch - -# CUDA 10.1 -conda install pytorch==1.6.0 torchvision==0.7.0 cudatoolkit=10.1 -c pytorch - -# CUDA 10.2 -conda install pytorch==1.6.0 torchvision==0.7.0 cudatoolkit=10.2 -c pytorch - -# CPU Only -conda install pytorch==1.6.0 torchvision==0.7.0 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==1.6.0 torchvision==0.7.0 -``` - -##### Linux and Windows - -``` -# CUDA 10.2 -pip install torch==1.6.0 torchvision==0.7.0 - -# CUDA 10.1 -pip install torch==1.6.0+cu101 torchvision==0.7.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html - -# CUDA 9.2 -pip install torch==1.6.0+cu92 torchvision==0.7.0+cu92 -f https://download.pytorch.org/whl/torch_stable.html - -# CPU only -pip install torch==1.6.0+cpu torchvision==0.7.0+cpu -f https://download.pytorch.org/whl/torch_stable.html -``` - -### v1.5.1 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==1.5.1 torchvision==0.6.1 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 9.2 -conda install pytorch==1.5.1 torchvision==0.6.1 cudatoolkit=9.2 -c pytorch - -# CUDA 10.1 -conda install pytorch==1.5.1 torchvision==0.6.1 cudatoolkit=10.1 -c pytorch - -# CUDA 10.2 -conda install pytorch==1.5.1 torchvision==0.6.1 cudatoolkit=10.2 -c pytorch - -# CPU Only -conda install pytorch==1.5.1 torchvision==0.6.1 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==1.5.1 torchvision==0.6.1 -``` - -##### Linux and Windows - -``` -# CUDA 10.2 -pip install torch==1.5.1 torchvision==0.6.1 - -# CUDA 10.1 -pip install torch==1.5.1+cu101 torchvision==0.6.1+cu101 -f https://download.pytorch.org/whl/torch_stable.html - -# CUDA 9.2 -pip install torch==1.5.1+cu92 torchvision==0.6.1+cu92 -f https://download.pytorch.org/whl/torch_stable.html - -# CPU only -pip install torch==1.5.1+cpu torchvision==0.6.1+cpu -f https://download.pytorch.org/whl/torch_stable.html -``` - -### v1.5.0 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==1.5.0 torchvision==0.6.0 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 9.2 -conda install pytorch==1.5.0 torchvision==0.6.0 cudatoolkit=9.2 -c pytorch - -# CUDA 10.1 -conda install pytorch==1.5.0 torchvision==0.6.0 cudatoolkit=10.1 -c pytorch - -# CUDA 10.2 -conda install pytorch==1.5.0 torchvision==0.6.0 cudatoolkit=10.2 -c pytorch - -# CPU Only -conda install pytorch==1.5.0 torchvision==0.6.0 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==1.5.0 torchvision==0.6.0 -``` - -##### Linux and Windows - -``` -# CUDA 10.2 -pip install torch==1.5.0 torchvision==0.6.0 - -# CUDA 10.1 -pip install torch==1.5.0+cu101 torchvision==0.6.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html - -# CUDA 9.2 -pip install torch==1.5.0+cu92 torchvision==0.6.0+cu92 -f https://download.pytorch.org/whl/torch_stable.html - -# CPU only -pip install torch==1.5.0+cpu torchvision==0.6.0+cpu -f https://download.pytorch.org/whl/torch_stable.html -``` - -### v1.4.0 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==1.4.0 torchvision==0.5.0 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 9.2 -conda install pytorch==1.4.0 torchvision==0.5.0 cudatoolkit=9.2 -c pytorch - -# CUDA 10.1 -conda install pytorch==1.4.0 torchvision==0.5.0 cudatoolkit=10.1 -c pytorch - -# CPU Only -conda install pytorch==1.4.0 torchvision==0.5.0 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==1.4.0 torchvision==0.5.0 -``` - -##### Linux and Windows - -``` -# CUDA 10.1 -pip install torch==1.4.0 torchvision==0.5.0 - -# CUDA 9.2 -pip install torch==1.4.0+cu92 torchvision==0.5.0+cu92 -f https://download.pytorch.org/whl/torch_stable.html - -# CPU only -pip install torch==1.4.0+cpu torchvision==0.5.0+cpu -f https://download.pytorch.org/whl/torch_stable.html -``` - -### v1.2.0 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==1.2.0 torchvision==0.4.0 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 9.2 -conda install pytorch==1.2.0 torchvision==0.4.0 cudatoolkit=9.2 -c pytorch - -# CUDA 10.0 -conda install pytorch==1.2.0 torchvision==0.4.0 cudatoolkit=10.0 -c pytorch - -# CPU Only -conda install pytorch==1.2.0 torchvision==0.4.0 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==1.2.0 torchvision==0.4.0 -``` - -##### Linux and Windows - -``` -# CUDA 10.0 -pip install torch==1.2.0 torchvision==0.4.0 - -# CUDA 9.2 -pip install torch==1.2.0+cu92 torchvision==0.4.0+cu92 -f https://download.pytorch.org/whl/torch_stable.html - -# CPU only -pip install torch==1.2.0+cpu torchvision==0.4.0+cpu -f https://download.pytorch.org/whl/torch_stable.html -``` - -### v1.1.0 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==1.1.0 torchvision==0.3.0 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 9.0 -conda install pytorch==1.1.0 torchvision==0.3.0 cudatoolkit=9.0 -c pytorch - -# CUDA 10.0 -conda install pytorch==1.1.0 torchvision==0.3.0 cudatoolkit=10.0 -c pytorch - -# CPU Only -conda install pytorch-cpu==1.1.0 torchvision-cpu==0.3.0 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==1.1.0 torchvision==0.3.0 -``` - -##### Linux and Windows - -``` -# CUDA 10.0 -Download and install wheel from https://download.pytorch.org/whl/cu100/torch_stable.html - -# CUDA 9.0 -Download and install wheel from https://download.pytorch.org/whl/cu90/torch_stable.html - -# CPU only -Download and install wheel from https://download.pytorch.org/whl/cpu/torch_stable.html -``` - -### v1.0.1 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==1.0.1 torchvision==0.2.2 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 9.0 -conda install pytorch==1.0.1 torchvision==0.2.2 cudatoolkit=9.0 -c pytorch - -# CUDA 10.0 -conda install pytorch==1.0.1 torchvision==0.2.2 cudatoolkit=10.0 -c pytorch - -# CPU Only -conda install pytorch-cpu==1.0.1 torchvision-cpu==0.2.2 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==1.0.1 torchvision==0.2.2 -``` - -##### Linux and Windows - -``` -# CUDA 10.0 -Download and install wheel from https://download.pytorch.org/whl/cu100/torch_stable.html - -# CUDA 9.0 -Download and install wheel from https://download.pytorch.org/whl/cu90/torch_stable.html - -# CPU only -Download and install wheel from https://download.pytorch.org/whl/cpu/torch_stable.html -``` - -### v1.0.0 - -#### Conda - -##### OSX - -``` -# conda -conda install pytorch==1.0.0 torchvision==0.2.1 -c pytorch -``` - -##### Linux and Windows - -``` -# CUDA 10.0 -conda install pytorch==1.0.0 torchvision==0.2.1 cuda100 -c pytorch - -# CUDA 9.0 -conda install pytorch==1.0.0 torchvision==0.2.1 cuda90 -c pytorch - -# CUDA 8.0 -conda install pytorch==1.0.0 torchvision==0.2.1 cuda80 -c pytorch - -# CPU Only -conda install pytorch-cpu==1.0.0 torchvision-cpu==0.2.1 cpuonly -c pytorch -``` - -#### Wheel - -##### OSX - -``` -pip install torch==1.0.0 torchvision==0.2.1 -``` - -##### Linux and Windows - -``` -# CUDA 10.0 -Download and install wheel from https://download.pytorch.org/whl/cu100/torch_stable.html - -# CUDA 9.0 -Download and install wheel from https://download.pytorch.org/whl/cu90/torch_stable.html - -# CUDA 8.0 -Download and install wheel from https://download.pytorch.org/whl/cu80/torch_stable.html - -# CPU only -Download and install wheel from https://download.pytorch.org/whl/cpu/torch_stable.html -``` - -## Commands for Versions < 1.0.0 - -### Via conda - -> This should be used for most previous macOS version installs. - -To install a previous version of PyTorch via Anaconda or Miniconda, -replace "0.4.1" in the following commands with the desired version -(i.e., "0.2.0"). - -Installing with CUDA 9 - -`conda install pytorch=0.4.1 cuda90 -c pytorch` - -or - -`conda install pytorch=0.4.1 cuda92 -c pytorch` - -Installing with CUDA 8 - -`conda install pytorch=0.4.1 cuda80 -c pytorch` - -Installing with CUDA 7.5 - -`conda install pytorch=0.4.1 cuda75 -c pytorch` - -Installing without CUDA - -`conda install pytorch=0.4.1 -c pytorch` - -### From source - -It is possible to checkout an older version of [PyTorch](https://github.com/pytorch/pytorch) -and build it. -You can list tags in PyTorch git repository with `git tag` and checkout a -particular one (replace '0.1.9' with the desired version) with - -`git checkout v0.1.9` - -Follow the install from source instructions in the README.md of the PyTorch -checkout. - -### Via pip - -Download the `whl` file with the desired version from the following html pages: - -- # CPU-only build -- # CUDA 8.0 build -- # CUDA 9.0 build -- # CUDA 9.2 build -- # CUDA 10.0 build - -Then, install the file with `pip install [downloaded file]` - - -Note: most pytorch versions are available only for specific CUDA versions. For example pytorch=1.0.1 is not available for CUDA 9.2 - -### (Old) PyTorch Linux binaries compiled with CUDA 7.5 - -These predate the html page above and have to be manually installed by downloading the wheel file and `pip install downloaded_file` - -- [cu75/torch-0.3.0.post4-cp36-cp36m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.3.0.post4-cp36-cp36m-linux_x86_64.whl) -- [cu75/torch-0.3.0.post4-cp35-cp35m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.3.0.post4-cp35-cp35m-linux_x86_64.whl) -- [cu75/torch-0.3.0.post4-cp27-cp27mu-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.3.0.post4-cp27-cp27mu-linux_x86_64.whl) -- [cu75/torch-0.3.0.post4-cp27-cp27m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.3.0.post4-cp27-cp27m-linux_x86_64.whl) -- [cu75/torch-0.2.0.post3-cp36-cp36m-manylinux1_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.2.0.post3-cp36-cp36m-manylinux1_x86_64.whl) -- [cu75/torch-0.2.0.post3-cp35-cp35m-manylinux1_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.2.0.post3-cp35-cp35m-manylinux1_x86_64.whl) -- [cu75/torch-0.2.0.post3-cp27-cp27mu-manylinux1_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.2.0.post3-cp27-cp27mu-manylinux1_x86_64.whl) -- [cu75/torch-0.2.0.post3-cp27-cp27m-manylinux1_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.2.0.post3-cp27-cp27m-manylinux1_x86_64.whl) -- [cu75/torch-0.2.0.post2-cp36-cp36m-manylinux1_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.2.0.post2-cp36-cp36m-manylinux1_x86_64.whl) -- [cu75/torch-0.2.0.post2-cp35-cp35m-manylinux1_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.2.0.post2-cp35-cp35m-manylinux1_x86_64.whl) -- [cu75/torch-0.2.0.post2-cp27-cp27mu-manylinux1_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.2.0.post2-cp27-cp27mu-manylinux1_x86_64.whl) -- [cu75/torch-0.2.0.post2-cp27-cp27m-manylinux1_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.2.0.post2-cp27-cp27m-manylinux1_x86_64.whl) -- [cu75/torch-0.2.0.post1-cp36-cp36m-manylinux1_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.2.0.post1-cp36-cp36m-manylinux1_x86_64.whl) -- [cu75/torch-0.2.0.post1-cp35-cp35m-manylinux1_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.2.0.post1-cp35-cp35m-manylinux1_x86_64.whl) -- [cu75/torch-0.2.0.post1-cp27-cp27mu-manylinux1_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.2.0.post1-cp27-cp27mu-manylinux1_x86_64.whl) -- [cu75/torch-0.2.0.post1-cp27-cp27m-manylinux1_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.2.0.post1-cp27-cp27m-manylinux1_x86_64.whl) -- [cu75/torch-0.1.12.post2-cp36-cp36m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.12.post2-cp36-cp36m-linux_x86_64.whl) -- [cu75/torch-0.1.12.post2-cp35-cp35m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.12.post2-cp35-cp35m-linux_x86_64.whl) -- [cu75/torch-0.1.12.post2-cp27-none-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.12.post2-cp27-none-linux_x86_64.whl) -- [cu75/torch-0.1.12.post1-cp36-cp36m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.12.post1-cp36-cp36m-linux_x86_64.whl) -- [cu75/torch-0.1.12.post1-cp35-cp35m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.12.post1-cp35-cp35m-linux_x86_64.whl) -- [cu75/torch-0.1.12.post1-cp27-none-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.12.post1-cp27-none-linux_x86_64.whl) -- [cu75/torch-0.1.11.post5-cp36-cp36m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.11.post5-cp36-cp36m-linux_x86_64.whl) -- [cu75/torch-0.1.11.post5-cp35-cp35m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.11.post5-cp35-cp35m-linux_x86_64.whl) -- [cu75/torch-0.1.11.post5-cp27-none-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.11.post5-cp27-none-linux_x86_64.whl) -- [cu75/torch-0.1.11.post4-cp36-cp36m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.11.post4-cp36-cp36m-linux_x86_64.whl) -- [cu75/torch-0.1.11.post4-cp35-cp35m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.11.post4-cp35-cp35m-linux_x86_64.whl) -- [cu75/torch-0.1.11.post4-cp27-none-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.11.post4-cp27-none-linux_x86_64.whl) -- [cu75/torch-0.1.10.post2-cp36-cp36m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.10.post2-cp36-cp36m-linux_x86_64.whl) -- [cu75/torch-0.1.10.post2-cp35-cp35m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.10.post2-cp35-cp35m-linux_x86_64.whl) -- [cu75/torch-0.1.10.post2-cp27-none-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.10.post2-cp27-none-linux_x86_64.whl) -- [cu75/torch-0.1.10.post1-cp36-cp36m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.10.post1-cp36-cp36m-linux_x86_64.whl) -- [cu75/torch-0.1.10.post1-cp35-cp35m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.10.post1-cp35-cp35m-linux_x86_64.whl) -- [cu75/torch-0.1.10.post1-cp27-none-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.10.post1-cp27-none-linux_x86_64.whl) -- [cu75/torch-0.1.9.post2-cp36-cp36m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.9.post2-cp36-cp36m-linux_x86_64.whl) -- [cu75/torch-0.1.9.post2-cp35-cp35m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.9.post2-cp35-cp35m-linux_x86_64.whl) -- [cu75/torch-0.1.9.post2-cp27-none-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.9.post2-cp27-none-linux_x86_64.whl) -- [cu75/torch-0.1.9.post1-cp36-cp36m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.9.post1-cp36-cp36m-linux_x86_64.whl) -- [cu75/torch-0.1.9.post1-cp35-cp35m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.9.post1-cp35-cp35m-linux_x86_64.whl) -- [cu75/torch-0.1.9.post1-cp27-none-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.9.post1-cp27-none-linux_x86_64.whl) -- [cu75/torch-0.1.8.post1-cp36-cp36m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.8.post1-cp36-cp36m-linux_x86_64.whl) -- [cu75/torch-0.1.8.post1-cp35-cp35m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.8.post1-cp35-cp35m-linux_x86_64.whl) -- [cu75/torch-0.1.8.post1-cp27-none-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.8.post1-cp27-none-linux_x86_64.whl) -- [cu75/torch-0.1.7.post2-cp36-cp36m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.7.post2-cp36-cp36m-linux_x86_64.whl) -- [cu75/torch-0.1.7.post2-cp35-cp35m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.7.post2-cp35-cp35m-linux_x86_64.whl) -- [cu75/torch-0.1.7.post2-cp27-none-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.7.post2-cp27-none-linux_x86_64.whl) -- [cu75/torch-0.1.6.post22-cp35-cp35m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.6.post22-cp35-cp35m-linux_x86_64.whl) -- [cu75/torch-0.1.6.post22-cp27-none-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.6.post22-cp27-none-linux_x86_64.whl) -- [cu75/torch-0.1.6.post20-cp35-cp35m-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.6.post20-cp35-cp35m-linux_x86_64.whl) -- [cu75/torch-0.1.6.post20-cp27-cp27mu-linux_x86_64.whl](https://download.pytorch.org/whl/cu75/torch-0.1.6.post20-cp27-cp27mu-linux_x86_64.whl) - -### Windows binaries - -- [cpu/torch-1.0.0-cp35-cp35m-win_amd64.whl](https://download.pytorch.org/whl/cpu/torch-1.0.0-cp35-cp35m-win_amd64.whl) -- [cu80/torch-1.0.0-cp35-cp35m-win_amd64.whl](https://download.pytorch.org/whl/cu80/torch-1.0.0-cp35-cp35m-win_amd64.whl) -- [cu90/torch-1.0.0-cp35-cp35m-win_amd64.whl](https://download.pytorch.org/whl/cu90/torch-1.0.0-cp35-cp35m-win_amd64.whl) -- [cu100/torch-1.0.0-cp35-cp35m-win_amd64.whl](https://download.pytorch.org/whl/cu100/torch-1.0.0-cp35-cp35m-win_amd64.whl) -- [cpu/torch-1.0.0-cp36-cp36m-win_amd64.whl](https://download.pytorch.org/whl/cpu/torch-1.0.0-cp36-cp36m-win_amd64.whl) -- [cu80/torch-1.0.0-cp36-cp36m-win_amd64.whl](https://download.pytorch.org/whl/cu80/torch-1.0.0-cp36-cp36m-win_amd64.whl) -- [cu90/torch-1.0.0-cp36-cp36m-win_amd64.whl](https://download.pytorch.org/whl/cu90/torch-1.0.0-cp36-cp36m-win_amd64.whl) -- [cu100/torch-1.0.0-cp36-cp36m-win_amd64.whl](https://download.pytorch.org/whl/cu100/torch-1.0.0-cp36-cp36m-win_amd64.whl) -- [cpu/torch-1.0.0-cp37-cp37m-win_amd64.whl](https://download.pytorch.org/whl/cpu/torch-1.0.0-cp37-cp37m-win_amd64.whl) -- [cu80/torch-1.0.0-cp37-cp37m-win_amd64.whl](https://download.pytorch.org/whl/cu80/torch-1.0.0-cp37-cp37m-win_amd64.whl) -- [cu90/torch-1.0.0-cp37-cp37m-win_amd64.whl](https://download.pytorch.org/whl/cu90/torch-1.0.0-cp37-cp37m-win_amd64.whl) -- [cu100/torch-1.0.0-cp37-cp37m-win_amd64.whl](https://download.pytorch.org/whl/cu100/torch-1.0.0-cp37-cp37m-win_amd64.whl) -- [cpu/torch-0.4.1-cp35-cp35m-win_amd64.whl](https://download.pytorch.org/whl/cpu/torch-0.4.1-cp35-cp35m-win_amd64.whl) -- [cu80/torch-0.4.1-cp35-cp35m-win_amd64.whl](https://download.pytorch.org/whl/cu80/torch-0.4.1-cp35-cp35m-win_amd64.whl) -- [cu90/torch-0.4.1-cp35-cp35m-win_amd64.whl](https://download.pytorch.org/whl/cu90/torch-0.4.1-cp35-cp35m-win_amd64.whl) -- [cu92/torch-0.4.1-cp35-cp35m-win_amd64.whl](https://download.pytorch.org/whl/cu92/torch-0.4.1-cp35-cp35m-win_amd64.whl) -- [cpu/torch-0.4.1-cp36-cp36m-win_amd64.whl](https://download.pytorch.org/whl/cpu/torch-0.4.1-cp36-cp36m-win_amd64.whl) -- [cu80/torch-0.4.1-cp36-cp36m-win_amd64.whl](https://download.pytorch.org/whl/cu80/torch-0.4.1-cp36-cp36m-win_amd64.whl) -- [cu90/torch-0.4.1-cp36-cp36m-win_amd64.whl](https://download.pytorch.org/whl/cu90/torch-0.4.1-cp36-cp36m-win_amd64.whl) -- [cu92/torch-0.4.1-cp36-cp36m-win_amd64.whl](https://download.pytorch.org/whl/cu92/torch-0.4.1-cp36-cp36m-win_amd64.whl) -- [cpu/torch-0.4.1-cp37-cp37m-win_amd64.whl](https://download.pytorch.org/whl/cpu/torch-0.4.1-cp37-cp37m-win_amd64.whl) -- [cu80/torch-0.4.1-cp37-cp37m-win_amd64.whl](https://download.pytorch.org/whl/cu80/torch-0.4.1-cp37-cp37m-win_amd64.whl) -- [cu90/torch-0.4.1-cp37-cp37m-win_amd64.whl](https://download.pytorch.org/whl/cu90/torch-0.4.1-cp37-cp37m-win_amd64.whl) -- [cu92/torch-0.4.1-cp37-cp37m-win_amd64.whl](https://download.pytorch.org/whl/cu92/torch-0.4.1-cp37-cp37m-win_amd64.whl) - -### Mac and misc. binaries - -For recent macOS binaries, use `conda`: - -e.g., - -`conda install pytorch=0.4.1 cuda90 -c pytorch` -`conda install pytorch=0.4.1 cuda92 -c pytorch` -`conda install pytorch=0.4.1 cuda80 -c pytorch` -`conda install pytorch=0.4.1 -c pytorch` # No CUDA - -- [torchvision-0.1.6-py3-none-any.whl](https://download.pytorch.org/whl/torchvision-0.1.6-py3-none-any.whl) -- [torchvision-0.1.6-py2-none-any.whl](https://download.pytorch.org/whl/torchvision-0.1.6-py2-none-any.whl) -- [torch-1.0.0-cp37-none-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/cpu/torch-1.0.0-cp37-none-macosx_10_7_x86_64.whl) -- [torch-1.0.0-cp36-none-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/cpu/torch-1.0.0-cp36-none-macosx_10_7_x86_64.whl) -- [torch-1.0.0-cp35-none-macosx_10_6_x86_64.whl](https://download.pytorch.org/whl/cpu/torch-1.0.0-cp35-none-macosx_10_6_x86_64.whl) -- [torch-1.0.0-cp27-none-macosx_10_6_x86_64.whl](https://download.pytorch.org/whl/cpu/torch-1.0.0-cp27-none-macosx_10_6_x86_64.whl) -- [torch-0.4.0-cp36-cp36m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.4.0-cp36-cp36m-macosx_10_7_x86_64.whl) -- [torch-0.4.0-cp35-cp35m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.4.0-cp35-cp35m-macosx_10_6_x86_64.whl) -- [torch-0.4.0-cp27-none-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.4.0-cp27-none-macosx_10_6_x86_64.whl) -- [torch-0.3.1-cp36-cp36m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.3.1-cp36-cp36m-macosx_10_7_x86_64.whl) -- [torch-0.3.1-cp35-cp35m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.3.1-cp35-cp35m-macosx_10_6_x86_64.whl) -- [torch-0.3.1-cp27-none-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.3.1-cp27-none-macosx_10_6_x86_64.whl) -- [torch-0.3.0.post4-cp36-cp36m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.3.0.post4-cp36-cp36m-macosx_10_7_x86_64.whl) -- [torch-0.3.0.post4-cp35-cp35m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.3.0.post4-cp35-cp35m-macosx_10_6_x86_64.whl) -- [torch-0.3.0.post4-cp27-none-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.3.0.post4-cp27-none-macosx_10_6_x86_64.whl) -- [torch-0.2.0.post3-cp36-cp36m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.2.0.post3-cp36-cp36m-macosx_10_7_x86_64.whl) -- [torch-0.2.0.post3-cp35-cp35m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.2.0.post3-cp35-cp35m-macosx_10_7_x86_64.whl) -- [torch-0.2.0.post3-cp27-none-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.2.0.post3-cp27-none-macosx_10_7_x86_64.whl) -- [torch-0.2.0.post2-cp36-cp36m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.2.0.post2-cp36-cp36m-macosx_10_7_x86_64.whl) -- [torch-0.2.0.post2-cp35-cp35m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.2.0.post2-cp35-cp35m-macosx_10_7_x86_64.whl) -- [torch-0.2.0.post2-cp27-none-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.2.0.post2-cp27-none-macosx_10_7_x86_64.whl) -- [torch-0.2.0.post1-cp36-cp36m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.2.0.post1-cp36-cp36m-macosx_10_7_x86_64.whl) -- [torch-0.2.0.post1-cp35-cp35m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.2.0.post1-cp35-cp35m-macosx_10_7_x86_64.whl) -- [torch-0.2.0.post1-cp27-none-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.2.0.post1-cp27-none-macosx_10_7_x86_64.whl) -- [torch-0.1.12.post2-cp36-cp36m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.12.post2-cp36-cp36m-macosx_10_7_x86_64.whl) -- [torch-0.1.12.post2-cp35-cp35m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.12.post2-cp35-cp35m-macosx_10_7_x86_64.whl) -- [torch-0.1.12.post2-cp27-none-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.12.post2-cp27-none-macosx_10_7_x86_64.whl) -- [torch-0.1.12.post1-cp36-cp36m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.12.post1-cp36-cp36m-macosx_10_7_x86_64.whl) -- [torch-0.1.12.post1-cp35-cp35m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.12.post1-cp35-cp35m-macosx_10_7_x86_64.whl) -- [torch-0.1.12.post1-cp27-none-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.12.post1-cp27-none-macosx_10_7_x86_64.whl) -- [torch-0.1.11.post5-cp36-cp36m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.11.post5-cp36-cp36m-macosx_10_7_x86_64.whl) -- [torch-0.1.11.post5-cp35-cp35m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.11.post5-cp35-cp35m-macosx_10_7_x86_64.whl) -- [torch-0.1.11.post5-cp27-none-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.11.post5-cp27-none-macosx_10_7_x86_64.whl) -- [torch-0.1.11.post4-cp36-cp36m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.11.post4-cp36-cp36m-macosx_10_7_x86_64.whl) -- [torch-0.1.11.post4-cp35-cp35m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.11.post4-cp35-cp35m-macosx_10_7_x86_64.whl) -- [torch-0.1.11.post4-cp27-none-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.11.post4-cp27-none-macosx_10_7_x86_64.whl) -- [torch-0.1.10.post1-cp36-cp36m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.10.post1-cp36-cp36m-macosx_10_7_x86_64.whl) -- [torch-0.1.10.post1-cp35-cp35m-macosx_10_6_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.10.post1-cp35-cp35m-macosx_10_6_x86_64.whl) -- [torch-0.1.10.post1-cp27-none-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.10.post1-cp27-none-macosx_10_7_x86_64.whl) -- [torch-0.1.9.post2-cp36-cp36m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.9.post2-cp36-cp36m-macosx_10_7_x86_64.whl) -- [torch-0.1.9.post2-cp35-cp35m-macosx_10_6_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.9.post2-cp35-cp35m-macosx_10_6_x86_64.whl) -- [torch-0.1.9.post2-cp27-none-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.9.post2-cp27-none-macosx_10_7_x86_64.whl) -- [torch-0.1.9.post1-cp36-cp36m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.9.post1-cp36-cp36m-macosx_10_7_x86_64.whl) -- [torch-0.1.9.post1-cp35-cp35m-macosx_10_6_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.9.post1-cp35-cp35m-macosx_10_6_x86_64.whl) -- [torch-0.1.9.post1-cp27-none-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.9.post1-cp27-none-macosx_10_7_x86_64.whl) -- [torch-0.1.8.post1-cp36-cp36m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.8.post1-cp36-cp36m-macosx_10_7_x86_64.whl) -- [torch-0.1.8.post1-cp35-cp35m-macosx_10_6_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.8.post1-cp35-cp35m-macosx_10_6_x86_64.whl) -- [torch-0.1.8.post1-cp27-none-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.8.post1-cp27-none-macosx_10_7_x86_64.whl) -- [torch-0.1.7.post2-cp36-cp36m-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.7.post2-cp36-cp36m-macosx_10_7_x86_64.whl) -- [torch-0.1.7.post2-cp35-cp35m-macosx_10_6_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.7.post2-cp35-cp35m-macosx_10_6_x86_64.whl) -- [torch-0.1.7.post2-cp27-none-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.7.post2-cp27-none-macosx_10_7_x86_64.whl) -- [torch-0.1.6.post22-cp35-cp35m-macosx_10_6_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.6.post22-cp35-cp35m-macosx_10_6_x86_64.whl) -- [torch-0.1.6.post22-cp27-none-macosx_10_7_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.6.post22-cp27-none-macosx_10_7_x86_64.whl) -- [torch-0.1.6.post20-cp35-cp35m-linux_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.6.post20-cp35-cp35m-linux_x86_64.whl) -- [torch-0.1.6.post20-cp27-cp27mu-linux_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.6.post20-cp27-cp27mu-linux_x86_64.whl) -- [torch-0.1.6.post17-cp35-cp35m-linux_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.6.post17-cp35-cp35m-linux_x86_64.whl) -- [torch-0.1.6.post17-cp27-cp27mu-linux_x86_64.whl](https://download.pytorch.org/whl/torch-0.1.6.post17-cp27-cp27mu-linux_x86_64.whl) -- [torch-0.1-cp35-cp35m-macosx_10_6_x86_64.whl](https://download.pytorch.org/whl/torch-0.1-cp35-cp35m-macosx_10_6_x86_64.whl) -- [torch-0.1-cp27-cp27m-macosx_10_6_x86_64.whl](https://download.pytorch.org/whl/torch-0.1-cp27-cp27m-macosx_10_6_x86_64.whl) -- [torch_cuda80-0.1.6.post20-cp35-cp35m-linux_x86_64.whl](https://download.pytorch.org/whl/torch_cuda80-0.1.6.post20-cp35-cp35m-linux_x86_64.whl) -- [torch_cuda80-0.1.6.post20-cp27-cp27mu-linux_x86_64.whl](https://download.pytorch.org/whl/torch_cuda80-0.1.6.post20-cp27-cp27mu-linux_x86_64.whl) -- [torch_cuda80-0.1.6.post17-cp35-cp35m-linux_x86_64.whl](https://download.pytorch.org/whl/torch_cuda80-0.1.6.post17-cp35-cp35m-linux_x86_64.whl) -- [torch_cuda80-0.1.6.post17-cp27-cp27mu-linux_x86_64.whl](https://download.pytorch.org/whl/torch_cuda80-0.1.6.post17-cp27-cp27mu-linux_x86_64.whl) - - - - - diff --git a/_get_started/pytorch.md b/_get_started/pytorch.md deleted file mode 100644 index 9ea724d6ddf1..000000000000 --- a/_get_started/pytorch.md +++ /dev/null @@ -1,668 +0,0 @@ ---- -layout: get_started -title: PyTorch 2.x -permalink: /get-started/pytorch-2.0/ -featured-img: "assets/images/featured-img-pytorch-2.png" -background-class: get-started-background -body-class: get-started -order: 2 -published: true ---- - -## Overview - -Introducing PyTorch 2.0, our first steps toward the next generation 2-series release of PyTorch. Over the last few years we have innovated and iterated from PyTorch 1.0 to the most recent 1.13 and moved to the newly formed PyTorch Foundation, part of the Linux Foundation. - -PyTorch’s biggest strength beyond our amazing community is that we continue as a first-class Python integration, imperative style, simplicity of the API and options. PyTorch 2.0 offers the same eager-mode development and user experience, while fundamentally changing and supercharging how PyTorch operates at compiler level under the hood. We are able to provide faster performance and support for Dynamic Shapes and Distributed. - -Below you will find all the information you need to better understand what PyTorch 2.0 is, where it’s going and more importantly how to get started today (e.g., tutorial, requirements, models, common FAQs). There is still a lot to learn and develop but we are looking forward to community feedback and contributions to make the 2-series better and thank you all who have made the 1-series so successful. - -## PyTorch 2.x: faster, more pythonic and as dynamic as ever - -Today, we announce `torch.compile`, a feature that pushes PyTorch performance to new heights and starts the move for parts of PyTorch from C++ back into Python. We believe that this is a substantial new direction for PyTorch -- hence we call it 2.0. `torch.compile` is a fully additive (and optional) feature and hence 2.0 is 100% backward compatible by definition. - -Underpinning `torch.compile` are new technologies -- TorchDynamo, AOTAutograd, PrimTorch and TorchInductor. - -- **TorchDynamo** captures PyTorch programs safely using Python Frame Evaluation Hooks and is a significant innovation that was a result of 5 years of our R&D into safe graph capture - -* **AOTAutograd** overloads PyTorch’s autograd engine as a tracing autodiff for generating ahead-of-time backward traces. - -- **PrimTorch** canonicalizes ~2000+ PyTorch operators down to a closed set of ~250 primitive operators that developers can target to build a complete PyTorch backend. This substantially lowers the barrier of writing a PyTorch feature or backend. -- **TorchInductor** is a deep learning compiler that generates fast code for multiple accelerators and backends. For NVIDIA and AMD GPUs, it uses OpenAI Triton as a key building block. - -TorchDynamo, AOTAutograd, PrimTorch and TorchInductor are written in Python and support dynamic shapes (i.e. the ability to send in Tensors of different sizes without inducing a recompilation), making them flexible, easily hackable and lowering the barrier of entry for developers and vendors. - -To validate these technologies, we used a diverse set of 163 open-source models across various machine learning domains. We built this benchmark carefully to include tasks such as Image Classification, Object Detection, Image Generation, various NLP tasks such as Language Modeling, Q&A, Sequence Classification, Recommender Systems and Reinforcement Learning. We separate the benchmarks into three categories: - -
    -
  • 46 models from HuggingFace Transformers
  • -
  • 61 models from TIMM: a collection of state-of-the-art PyTorch image models by Ross Wightman
  • -
  • 56 models from TorchBench: a curated set of popular code-bases from across github
  • -
- - - -We don’t modify these open-source models except to add a `torch.compile` call wrapping them. - -We then measure speedups and validate accuracy across these models. Since speedups can be dependent on data-type, we measure speedups on both float32 and Automatic Mixed Precision (AMP). We report an uneven weighted average speedup of _0.75 * AMP + 0.25 * float32_ since we find AMP is more common in practice. - -Across these 163 open-source models `torch.compile` works 93% of time, and the model runs 43% faster in training on an NVIDIA A100 GPU. At Float32 precision, it runs 21% faster on average and at AMP Precision it runs 51% faster on average. - -**Caveats:** On a desktop-class GPU such as a NVIDIA 3090, we’ve measured that speedups are lower than on server-class GPUs such as A100. As of today, our default backend TorchInductor supports CPUs and NVIDIA Volta and Ampere GPUs. It does not (yet) support other GPUs, xPUs or older NVIDIA GPUs. - -

- -

Speedups for torch.compile against eager mode on an NVIDIA A100 GPU
-

- -**Try it:** `torch.compile` is in the early stages of development. Starting today, you can try out `torch.compile` in the `nightly` binaries. We expect to ship the first stable 2.0 release in early March 2023. - -In the roadmap of PyTorch 2.x we hope to push the compiled mode further and further in terms of performance and scalability. Some of this work is in-flight, as we talked about at the Conference today. Some of this work has not started yet. Some of this work is what we hope to see, but don’t have the bandwidth to do ourselves. If you are interested in contributing, come chat with us at the **Ask the Engineers: 2.0 Live Q&A Series** starting this month (details at the end of this post) and/or via Github / Forums. - -

- -

- -### Testimonials - -Here is what some of PyTorch’s users have to say about our new direction: - -**Sylvain Gugger** the **primary maintainer of HuggingFace transformers**: - -_"With just one line of code to add, PyTorch 2.0 gives a speedup between 1.5x and 2.x in training Transformers models. This is the most exciting thing since mixed precision training was introduced!"_ - -**Ross Wightman the primary maintainer of TIMM** (one of the largest vision model hubs within the PyTorch ecosystem): - -_“It just works out of the box with majority of TIMM models for inference and train workloads with no code changes”_ - -**Luca Antiga** the **CTO of Lightning AI** and one of the **primary maintainers of PyTorch Lightning** - -_“PyTorch 2.0 embodies the future of deep learning frameworks. The possibility to capture a PyTorch program with effectively no user intervention and get massive on-device speedups and program manipulation out of the box unlocks a whole new dimension for AI developers.”_ - -## Motivation - -Our philosophy on PyTorch has always been to keep flexibility and hackability our top priority, and performance as a close second. We strived for: - -1. High-Performance eager execution -2. Pythonic internals -3. Good abstractions for Distributed, Autodiff, Data loading, Accelerators, etc. - -Since we launched PyTorch in 2017, hardware accelerators (such as GPUs) have become ~15x faster in compute and about ~2x faster in the speed of memory access. So, to keep eager execution at high-performance, we've had to move substantial parts of PyTorch internals into C++. Moving internals into C++ makes them less hackable and increases the barrier of entry for code contributions. - -From day one, we knew the performance limits of eager execution. In July 2017, we started our first research project into developing a Compiler for PyTorch. The compiler needed to make a PyTorch program fast, but not at the cost of the PyTorch experience. Our key criteria was to preserve certain kinds of flexibility -- support for dynamic shapes and dynamic programs which researchers use in various stages of exploration. - -

- -

- -## Technology Overview - -Over the years, we've built several compiler projects within PyTorch. Let us break down the compiler into three parts: - -- graph acquisition -- graph lowering -- graph compilation - -Graph acquisition was the harder challenge when building a PyTorch compiler. - -In the past 5 years, we built `torch.jit.trace`, TorchScript, FX tracing, Lazy Tensors. But none of them felt like they gave us everything we wanted. Some were flexible but not fast, some were fast but not flexible and some were neither fast nor flexible. Some had bad user-experience (like being silently wrong). While TorchScript was promising, it needed substantial changes to your code and the code that your code depended on. This need for substantial change in code made it a non-starter for a lot of PyTorch users. - -

- -

The PyTorch compilation process
-

- -### TorchDynamo: Acquiring Graphs reliably and fast - -Earlier this year, we started working on TorchDynamo, an approach that uses a CPython feature introduced in [PEP-0523](https://peps.python.org/pep-0523/) called the Frame Evaluation API. We took a data-driven approach to validate its effectiveness on Graph Capture. We used 7,000+ Github projects written in PyTorch as our validation set. While TorchScript and others struggled to even acquire the graph 50% of the time, often with a big overhead, TorchDynamo acquired the graph [99% of the time](https://dev-discuss.pytorch.org/t/torchdynamo-update-8-torchdynamo-passed-correctness-check-on-7k-github-models/663), correctly, safely and with negligible overhead – without needing any changes to the original code. This is when we knew that we finally broke through the barrier that we were struggling with for many years in terms of flexibility and speed. - -### TorchInductor: fast codegen using a define-by-run IR - -For a new compiler backend for PyTorch 2.0, we took inspiration from how our users were writing high performance custom kernels: increasingly using the [Triton](https://github.com/openai/triton) language. We also wanted a compiler backend that used similar abstractions to PyTorch eager, and was general purpose enough to support the wide breadth of features in PyTorch. TorchInductor uses a pythonic define-by-run loop level IR to automatically map PyTorch models into generated Triton code on GPUs and C++/OpenMP on CPUs. TorchInductor’s core loop level IR contains only ~50 operators, and it is implemented in Python, making it easily hackable and extensible. - -### AOTAutograd: reusing Autograd for ahead-of-time graphs - -For PyTorch 2.0, we knew that we wanted to accelerate training. Thus, it was critical that we not only captured user-level code, but also that we captured backpropagation. Moreover, we knew that we wanted to reuse the existing battle-tested PyTorch autograd system. AOTAutograd leverages PyTorch’s **torch_dispatch** extensibility mechanism to trace through our Autograd engine, allowing us to capture the backwards pass “ahead-of-time”. This allows us to accelerate both our forwards _and_ backwards pass using TorchInductor. - -### PrimTorch: Stable Primitive operators - -Writing a backend for PyTorch is challenging. PyTorch has 1200+ operators, and 2000+ if you consider various overloads for each operator. - -

- -

A breakdown of the 2000+ PyTorch operators
-

- -Hence, writing a backend or a cross-cutting feature becomes a draining endeavor. Within the PrimTorch project, we are working on defining smaller and stable operator sets. PyTorch programs can consistently be lowered to these operator sets. We aim to define two operator sets: - -- Prim ops with about ~250 operators, which are fairly low-level. These are suited for compilers because they are low-level enough that you need to fuse them back together to get good performance. -- ATen ops with about ~750 canonical operators and suited for exporting as-is. These are suited for backends that already integrate at the ATen level or backends that won't have compilation to recover performance from a lower-level operator set like Prim ops. - -We discuss more about this topic below in the Developer/Vendor Experience section - -## User Experience - -We introduce a simple function `torch.compile` that wraps your model and returns a compiled model. - -```python -compiled_model = torch.compile(model) -``` - -This `compiled_model` holds a reference to your model and compiles the `forward` function to a more optimized version. When compiling the model, we give a few knobs to adjust it: - -```python -def torch.compile(model: Callable, - *, - mode: Optional[str] = "default", - dynamic: bool = False, - fullgraph:bool = False, - backend: Union[str, Callable] = "inductor", - # advanced backend options go here as kwargs - **kwargs -) -> torch._dynamo.NNOptimizedModule -``` - -- **mode** specifies what the compiler should be optimizing while compiling. - - - The default mode is a preset that tries to compile efficiently without taking too long to compile or using extra memory. - - Other modes such as `reduce-overhead` reduce the framework overhead by a lot more, but cost a small amount of extra memory. `max-autotune` compiles for a long time, trying to give you the fastest code it can generate. - -- **dynamic** specifies whether to enable the code path for Dynamic Shapes. Certain compiler optimizations cannot be applied to dynamic shaped programs. Making it explicit whether you want a compiled program with dynamic shapes or with static shapes will help the compiler give you better optimized code. -- **fullgraph** is similar to Numba’s `nopython`. It compiles the entire program into a single graph or gives an error explaining why it could not do so. Most users don’t need to use this mode. If you are very performance conscious, then you try to use it. -- **backend** specifies which compiler backend to use. By default, TorchInductor is used, but there are a few others available. - -

- -

- -The compile experience intends to deliver most benefits and the most flexibility in the default mode. Here is a mental model of what you get in each mode. - -Now, let us look at a full example of compiling a real model and running it (with random data) - -```python -import torch -import torchvision.models as models - -model = models.resnet18().cuda() -optimizer = torch.optim.SGD(model.parameters(), lr=0.01) -compiled_model = torch.compile(model) - -x = torch.randn(16, 3, 224, 224).cuda() -optimizer.zero_grad() -out = compiled_model(x) -out.sum().backward() -optimizer.step() -``` - -The first time you run the `compiled_model(x)`, it compiles the model. Hence, it takes longer to run. Subsequent runs are fast. - -### Modes - -The compiler has a few presets that tune the compiled model in different ways. -You might be running a small model that is slow because of framework overhead. Or, you might be running a large model that barely fits into memory. Depending on your need, you might want to use a different mode. - -```python -# API NOT FINAL -# default: optimizes for large models, low compile-time -# and no extra memory usage -torch.compile(model) - -# reduce-overhead: optimizes to reduce the framework overhead -# and uses some extra memory. Helps speed up small models -torch.compile(model, mode="reduce-overhead") - -# max-autotune: optimizes to produce the fastest model, -# but takes a very long time to compile -torch.compile(model, mode="max-autotune") - -``` - -### Reading and updating Attributes - -Accessing model attributes work as they would in eager mode. -You can access or modify attributes of your model (such as `model.conv1.weight`) as you generally would. This is completely safe and sound in terms of code correction. TorchDynamo inserts guards into the code to check if its assumptions hold true. If attributes change in certain ways, then TorchDynamo knows to recompile automatically as needed. - -```python -# optimized_model works similar to model, feel free to access its attributes and modify them -optimized_model.conv1.weight.fill_(0.01) - -# this change is reflected in model -``` - -### Hooks - -Module and Tensor [hooks](https://pytorch.org/docs/stable/notes/modules.html#module-hooks) don’t fully work at the moment, but they will eventually work as we finish development. - -### Serialization - -You can serialize the state-dict of the `optimized_model` OR the `model`. They point to the same parameters and state and hence are equivalent. - -```python -torch.save(optimized_model.state_dict(), "foo.pt") -# both these lines of code do the same thing -torch.save(model.state_dict(), "foo.pt") -``` - -You cannot serialize `optimized_model` currently. If you wish to save the object directly, save `model` instead. - -```python -torch.save(optimized_model, "foo.pt") # Error -torch.save(model, "foo.pt") # Works -``` - -### Inference and Export - -For model inference, after generating a compiled model using torch.compile, run some warm-up steps before actual model serving. This helps mitigate latency spikes during initial serving. - -In addition, we will be introducing a mode called `torch.export` that carefully exports the entire model and the guard infrastructure for environments that need guaranteed and predictable latency. `torch.export` would need changes to your program, especially if you have data dependent control-flow. - -```python -# API Not Final -exported_model = torch._dynamo.export(model, input) -torch.save(exported_model, "foo.pt") -``` - -This is in early stages of development. Catch the talk on Export Path at the PyTorch Conference for more details. You can also engage on this topic at our “Ask the Engineers: 2.0 Live Q&A Series” starting this month (more details at the end of this post). - -### Debugging Issues - -A compiled mode is opaque and hard to debug. You will have questions such as: - -- Why is my program crashing in compiled mode? -- Is compiled mode as accurate as eager mode? -- Why am I not seeing speedups? - -If compiled mode produces an error or a crash or diverging results from eager mode (beyond machine precision limits), it is very unlikely that it is your code’s fault. However, understanding what piece of code is the reason for the bug is useful. - -To aid in debugging and reproducibility, we have created several tools and logging capabilities out of which one stands out: **The Minifier.** - -The minifier automatically reduces the issue you are seeing to a small snippet of code. This small snippet of code reproduces the original issue and you can file a github issue with the minified code. This will help the PyTorch team fix the issue easily and quickly. - -If you are not seeing the speedups that you expect, then we have the **torch.\_dynamo.explain** tool that explains which parts of your code induced what we call “graph breaks”. Graph breaks generally hinder the compiler from speeding up the code, and reducing the number of graph breaks likely will speed up your code (up to some limit of diminishing returns). - -You can read about these and more in our [troubleshooting guide](https://pytorch.org/docs/stable/torch.compiler_troubleshooting.html). - -### Dynamic Shapes - -When looking at what was necessary to support the generality of PyTorch code, one key requirement was supporting dynamic shapes, and allowing models to take in tensors of different sizes without inducing recompilation every time the shape changes. - -As of today, support for Dynamic Shapes is limited and a rapid work in progress. It will be fully featured by stable release. It is gated behind a `dynamic=True` argument, and we have more progress on a feature branch (symbolic-shapes), on which we have successfully run BERT_pytorch in training with full symbolic shapes with TorchInductor. For inference with dynamic shapes, we have more coverage. For example, let’s look at a common setting where dynamic shapes are helpful - text generation with language models. - -We can see that even when the shape changes dynamically from 4 all the way to 256, Compiled mode is able to consistently outperform eager by up to 40%. Without support for dynamic shapes, a common workaround is to pad to the nearest power of two. However, as we can see from the charts below, it incurs a significant amount of performance overhead, and also results in significantly longer compilation time. Moreover, padding is sometimes non-trivial to do correctly. - -By supporting dynamic shapes in PyTorch 2.0’s Compiled mode, we can get the best of performance _and_ ease of use. - -
- - -
- -The current work is evolving very rapidly and we may temporarily let some models regress as we land fundamental improvements to infrastructure. The latest updates for our progress on dynamic shapes can be found [here](https://dev-discuss.pytorch.org/t/state-of-symbolic-shapes-branch/777/19). - -## Distributed - -In summary, torch.distributed’s two main distributed wrappers work well in compiled mode. - -Both `DistributedDataParallel` (DDP) and `FullyShardedDataParallel` (FSDP) work in compiled mode and provide improved performance and memory utilization relative to eager mode, with some caveats and limitations. - -

-

Speedups in AMP Precision
- -
Left: speedups for FSDP in Compiled mode over eager mode (AMP precision).
-Right: FSDP in Compiled mode takes substantially lesser memory than in eager mode
-

- -
- - -
- -### DistributedDataParallel (DDP) - -DDP relies on overlapping AllReduce communications with backwards computation, and grouping smaller per-layer AllReduce operations into ‘buckets’ for greater efficiency. AOTAutograd functions compiled by TorchDynamo prevent communication overlap, when combined naively with DDP, but performance is recovered by compiling separate subgraphs for each ‘bucket’ and allowing communication ops to happen outside and in-between the subgraphs. DDP support in compiled mode also currently requires `static_graph=False`. See [this post](https://dev-discuss.pytorch.org/t/torchdynamo-update-9-making-ddp-work-with-torchdynamo/860) for more details on the approach and results for DDP + TorchDynamo. - -### FullyShardedDataParallel (FSDP) - -FSDP itself is a “beta” PyTorch feature and has a higher level of system complexity than DDP due to the ability to tune which submodules are wrapped and because there are generally more configuration options. FSDP works with TorchDynamo and TorchInductor for a variety of popular models, if configured with the `use_original_params=True` flag. Some compatibility issues with particular models or configurations are expected at this time, but will be actively improved, and particular models can be prioritized if github issues are filed. - -Users specify an `auto_wrap_policy` argument to indicate which submodules of their model to wrap together in an FSDP instance used for state sharding, or manually wrap submodules in FSDP instances. For example, many transformer models work well when each ‘transformer block’ is wrapped in a separate FSDP instance and thus only the full state of one transformer block needs to be materialized at one time. Dynamo will insert graph breaks at the boundary of each FSDP instance, to allow communication ops in forward (and backward) to happen outside the graphs and in parallel to computation. - -If FSDP is used without wrapping submodules in separate instances, it falls back to operating similarly to DDP, but without bucketing. Hence all gradients are reduced in one operation, and there can be no compute/communication overlap even in Eager. This configuration has only been tested with TorchDynamo for functionality but not for performance. - -## Developer/Vendor Experience - -With PyTorch 2.0, we want to simplify the backend (compiler) integration experience. To do this, we have focused on **reducing the number of operators** and **simplifying the semantics** of the operator set necessary to bring up a PyTorch backend. - -In graphical form, the PT2 stack looks like: - -

- -

- -Starting in the middle of the diagram, AOTAutograd dynamically captures autograd logic in an ahead-of-time fashion, producing a graph of forward and backwards operators in FX graph format. - -We provide a set of hardened decompositions (i.e. operator implementations written in terms of other operators) that can be leveraged to **reduce** the number of operators a backend is required to implement. We also **simplify** the semantics of PyTorch operators by selectively rewriting complicated PyTorch logic including mutations and views via a process called _functionalization_, as well as guaranteeing operator metadata information such as shape propagation formulas. This work is actively in progress; our goal is to provide a _primitive_ and _stable_ set of ~250 operators with simplified semantics, called _PrimTorch,_ that vendors can leverage (i.e. opt-in to) in order to simplify their integrations. -After reducing and simplifying the operator set, backends may choose to integrate at the Dynamo (i.e. the middle layer, immediately after AOTAutograd) or Inductor (the lower layer). We describe some considerations in making this choice below, as well as future work around mixtures of backends. - -**Dynamo Backend** - -Vendors with existing compiler stacks may find it easiest to integrate as a TorchDynamo backend, receiving an FX Graph in terms of ATen/Prims IR. Note that for both training and inference, the integration point would be immediately after AOTAutograd, since we currently apply decompositions as part of AOTAutograd, and merely skip the backward-specific steps if targeting inference. - -**Inductor backend** - -Vendors can also integrate their backend directly into Inductor. Inductor takes in a graph produced by AOTAutograd that consists of ATen/Prim operations, and further lowers them down to a loop level IR. Today, Inductor provides lowerings to its loop-level IR for pointwise, reduction, scatter/gather and window operations. In addition, Inductor creates fusion groups, does indexing simplification, dimension collapsing, and tunes loop iteration order in order to support efficient code generation. Vendors can then integrate by providing the mapping from the loop level IR to hardware-specific code. Currently, Inductor has two backends: (1) C++ that generates multithreaded CPU code, (2) Triton that generates performant GPU code. These Inductor backends can be used as an inspiration for the alternate backends. - -**Mixture of Backends Interface (coming soon)** - -We have built utilities for partitioning an FX graph into subgraphs that contain operators supported by a backend and executing the remainder eagerly. These utilities can be extended to support a “mixture of backends,” configuring which portions of the graphs to run for which backend. However, there is not yet a stable interface or contract for backends to expose their operator support, preferences for patterns of operators, etc. This remains as ongoing work, and we welcome feedback from early adopters. - -## Final Thoughts - -We are super excited about the direction that we’ve taken for PyTorch 2.0 and beyond. The road to the final 2.0 release is going to be rough, but come join us on this journey early-on. If you are interested in deep-diving further or contributing to the compiler, please continue reading below which includes more information on how to get started (e.g., tutorials, benchmarks, models, FAQs) and **Ask the Engineers: 2.0 Live Q&A Series** starting this month. Additional resources include: - -- [Getting Started](https://pytorch.org/docs/stable/torch.compiler_get_started.html) -- [Tutorials](https://pytorch.org/tutorials/) -- [Documentation](https://pytorch.org/docs/stable) -- [Developer Discussions](https://dev-discuss.pytorch.org) - - - - - -## Accelerating Hugging Face and TIMM models with PyTorch 2.0 - -Author: Mark Saroufim - -`torch.compile()` makes it easy to experiment with different compiler backends to make PyTorch code faster with a single line decorator `torch.compile()`. It works either directly over an nn.Module as a drop-in replacement for torch.jit.script() but without requiring you to make any source code changes. We expect this one line code change to provide you with between 30%-2x training time speedups on the vast majority of models that you’re already running. - -```python -opt_module = torch.compile(module) -``` - -torch.compile supports arbitrary PyTorch code, control flow, mutation and comes with experimental support for dynamic shapes. We’re so excited about this development that we call it PyTorch 2.0. - -What makes this announcement different for us is we’ve already benchmarked some of the most popular open source PyTorch models and gotten substantial speedups ranging from 30% to 2x [https://github.com/pytorch/torchdynamo/issues/681](https://github.com/pytorch/torchdynamo/issues/681). - -There are no tricks here, we’ve pip installed popular libraries like [https://github.com/huggingface/transformers](https://github.com/huggingface/transformers), [https://github.com/huggingface/accelerate](https://github.com/huggingface/accelerate) and [https://github.com/rwightman/pytorch-image-models](https://github.com/rwightman/pytorch-image-models) and then ran torch.compile() on them and that’s it. - -It’s rare to get both performance and convenience, but this is why the core team finds PyTorch 2.0 so exciting. - -## Requirements - -For GPU (newer generation GPUs will see drastically better performance) - -``` -pip3 install numpy --pre torch --force-reinstall --index-url https://download.pytorch.org/whl/nightly/cu117 -``` - -For CPU - -``` -pip3 install --pre torch --index-url https://download.pytorch.org/whl/nightly/cpu -``` - -Optional: Verify Installation - -``` -git clone https://github.com/pytorch/pytorch -cd tools/dynamo -python verify_dynamo.py -``` - -Optional: Docker installation - -We also provide all the required dependencies in the PyTorch nightly -binaries which you can download with - -``` -docker pull ghcr.io/pytorch/pytorch-nightly -``` - -And for ad hoc experiments just make sure that your container has access to all your GPUs - -``` -docker run --gpus all -it ghcr.io/pytorch/pytorch-nightly:latest /bin/bash -``` - -## Getting Started - -Please read Mark Saroufim’s [full blog post](/blog/Accelerating-Hugging-Face-and-TIMM-models/) where he walks you through a tutorial and real models for you to try PyTorch 2.0 today. - -Our goal with PyTorch was to build a breadth-first compiler that would speed up the vast majority of actual models people run in open source. The Hugging Face Hub ended up being an extremely valuable benchmarking tool for us, ensuring that any optimization we work on actually helps accelerate models people want to run. - -The blog tutorial will show you exactly how to replicate those speedups so you can be as excited as to PyTorch 2.0 as we are. So please try out PyTorch 2.0, enjoy the free perf and if you’re not seeing it then please open an issue and we will make sure your model is supported [https://github.com/pytorch/torchdynamo/issues](https://github.com/pytorch/torchdynamo/issues) - -After all, we can’t claim we’re created a breadth-first unless **YOUR** models actually run faster. - -## FAQs - -1. **What is PT 2.0?** -2.0 is the latest PyTorch version. PyTorch 2.0 offers the same eager-mode development experience, while adding a compiled mode via torch.compile. This compiled mode has the potential to speedup your models during training and inference. - - -2. **Why 2.0 instead of 1.14?** -PyTorch 2.0 is what 1.14 would have been. We were releasing substantial new features that we believe change how you meaningfully use PyTorch, so we are calling it 2.0 instead. - -3. **How do I install 2.0? Any additional requirements?** - - Install the latest nightlies: - - CUDA 11.8
- ``` - pip3 install numpy --pre torch torchvision torchaudio --force-reinstall --index-url https://download.pytorch.org/whl/nightly/cu118 - ``` - CUDA 11.7 - ``` - pip3 install numpy --pre torch torchvision torchaudio --force-reinstall --index-url https://download.pytorch.org/whl/nightly/cu117 - ``` - CPU - ``` - pip3 install numpy --pre torch torchvision torchaudio --force-reinstall --index-url https://download.pytorch.org/whl/nightly/cpu - ``` - -4. **Is 2.0 code backwards-compatible with 1.X?** -Yes, using 2.0 will not require you to modify your PyTorch workflows. A single line of code `model = torch.compile(model)` can optimize your model to use the 2.0 stack, and smoothly run with the rest of your PyTorch code. This is completely opt-in, and you are not required to use the new compiler. - -5. **Is 2.0 enabled by default?** -2.0 is the name of the release. torch.compile is the feature released in 2.0, and you need to explicitly use torch.compile. - -6. **How do I migrate my PT1.X code to PT2.0?** -Your code should be working as-is without the need for any migrations. If you want to use the new Compiled mode feature introduced in 2.0, then you can start by optimizing your model with one line: `model = torch.compile(model)`. -While the speedups are primarily observed during training, you can also use it for inference if your model runs faster than eager mode. - ```python - import torch - - def train(model, dataloader): - model = torch.compile(model) - for batch in dataloader: - run_epoch(model, batch) - - def infer(model, input): - model = torch.compile(model) - return model(\*\*input) - ``` - -7. **Why should I use PT2.0 instead of PT 1.X?** -See answer to Question (2). - -8. **What is my code doing differently when running PyTorch 2.0?** -Out of the box, PyTorch 2.0 is the same as PyTorch 1.x, your models run in eager-mode i.e. every line of Python is executed one after the other. -In 2.0, if you wrap your model in `model = torch.compile(model)`, your model goes through 3 steps before execution: - 1. Graph acquisition: first the model is rewritten as blocks of subgraphs. Subgraphs which can be compiled by TorchDynamo are “flattened” and the other subgraphs (which might contain control-flow code or other unsupported Python constructs) will fall back to Eager-Mode. - 2. Graph lowering: all the PyTorch operations are decomposed into their constituent kernels specific to the chosen backend. - 3. Graph compilation, where the kernels call their corresponding low-level device-specific operations. - -9. **What new components does PT2.0 add to PT?** - - **TorchDynamo** generates FX Graphs from Python bytecode. It maintains the eager-mode capabilities using [guards](https://pytorch.org/docs/stable/torch.compiler_guards_overview.html#caching-and-guards-overview) to ensure the generated graphs are valid ([read more](https://dev-discuss.pytorch.org/t/torchdynamo-an-experiment-in-dynamic-python-bytecode-transformation/361)) - - **AOTAutograd** to generate the backward graph corresponding to the forward graph captured by TorchDynamo ([read more](https://dev-discuss.pytorch.org/t/torchdynamo-update-6-training-support-with-aotautograd/570)). - - **PrimTorch** to decompose complicated PyTorch operations into simpler and more elementary ops ([read more](https://dev-discuss.pytorch.org/t/tracing-with-primitives-update-2/645)). - - **\[Backend]** Backends integrate with TorchDynamo to compile the graph into IR that can run on accelerators. For example, **TorchInductor** compiles the graph to either **Triton** for GPU execution or **OpenMP** for CPU execution ([read more](https://dev-discuss.pytorch.org/t/torchinductor-a-pytorch-native-compiler-with-define-by-run-ir-and-symbolic-shapes/747)). - -10. **What compiler backends does 2.0 currently support?** -The default and the most complete backend is [TorchInductor](https://github.com/pytorch/pytorch/tree/master/torch/_inductor), but TorchDynamo has a growing list of backends that can be found by calling `torchdynamo.list_backends()`. - -11. **How does distributed training work with 2.0?** -DDP and FSDP in Compiled mode can run up to 15% faster than Eager-Mode in FP32 and up to 80% faster in AMP precision. PT2.0 does some extra optimization to ensure DDP’s communication-computation overlap works well with Dynamo’s partial graph creation. Ensure you run DDP with static_graph=False. More details [here](https://dev-discuss.pytorch.org/t/torchdynamo-update-9-making-ddp-work-with-torchdynamo/860). - -12. **How can I learn more about PT2.0 developments?** -The [PyTorch Developers forum](http://dev-discuss.pytorch.org/) is the best place to learn about 2.0 components directly from the developers who build them. - -13. **Help my code is running slower with 2.0’s Compiled Mode!** -The most likely reason for performance hits is too many graph breaks. For instance, something innocuous as a print statement in your model’s forward triggers a graph break. We have ways to diagnose these - read more [here](https://pytorch.org/docs/stable/torch.compiler_faq.html#why-am-i-not-seeing-speedups). - -14. **My previously-running code is crashing with 2.0’s Compiled Mode! How do I debug it?** -Here are some techniques to triage where your code might be failing, and printing helpful logs: [https://pytorch.org/docs/stable/torch.compiler_faq.html#why-is-my-code-crashing](https://pytorch.org/docs/stable/torch.compiler_faq.html#why-is-my-code-crashing). - -## Ask the Engineers: 2.0 Live Q&A Series - -We will be hosting a series of live Q&A sessions for the community to have deeper questions and dialogue with the experts. Please check back to see the full calendar of topics throughout the year. If you are unable to attend: 1) They will be recorded for future viewing and 2) You can attend our Dev Infra Office Hours every Friday at 10 AM PST @ [https://github.com/pytorch/pytorch/wiki/Dev-Infra-Office-Hours](https://github.com/pytorch/pytorch/wiki/Dev-Infra-Office-Hours). - -Please click [here](https://pytorchconference22.splashthat.com/) to see dates, times, descriptions and links. - -Disclaimer: Please do not share your personal information, last name, company when joining the live sessions and submitting questions. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TOPICHOST
The new developer experience of using 2.0 (install, setup, clone an example, run with 2.0)Suraj Subramanian
- LinkedIn | - Twitter -
PT2 Profiling and DebuggingBert Maher
- LinkedIn | - Twitter -
A deep dive on TorchInductor and PT 2.0 Backend IntegrationNatalia Gimelshein, Bin Bao and Sherlock Huang
- Natalia Gimelshein
- LinkedIn
- Sherlock Huang
- LinkedIn -
Extend PyTorch without C++ and functorch: JAX-like composable function transforms for PyTorchAnjali Chourdia and Samantha Andow
- Anjali Chourdia
- LinkedIn | - Twitter
- Samantha Andow
- LinkedIn | - Twitter -
A deep dive on TorchDynamoMichael Voznesensky
- LinkedIn -
Rethinking data loading with TorchData:Datapipes and Dataloader2Kevin Tse
- LinkedIn -
Composable training (+ torcheval, torchsnapshot)Ananth Subramaniam
How and why contribute code and tutorials to PyTorchZain Rizvi, Svetlana Karslioglu and Carl Parker
- Zain Rizvi
- LinkedIn | - Twitter
- Svetlana Karslioglu
- LinkedIn | - Twitter -
Dynamic Shapes and Calculating Maximum Batch SizeEdward Yang and Elias Ellison
- Edward Yang
- Twitter -
PyTorch 2.0 Export: Sound Whole Graph Capture for PyTorchMichael Suo and Yanan Cao
- Yanan Cao
- LinkedIn -
2-D Parallelism using DistributedTensor and PyTorch DistributedTensorWanchao Liang and Alisson Gusatti Azzolini
- Wanchao Liang
- LinkedIn | - Twitter
- Alisson Gusatti Azzolini
- LinkedIn -
TorchRec and FSDP in ProductionDennis van der Staay, Andrew Gu and Rohan Varma
- Dennis van der Staay
- LinkedIn
- Rohan Varma
- LinkedIn | - Twitter -
The Future of PyTorch On-DeviceRaziel Alvarez Guevara
- LinkedIn | - Twitter -
TorchMultiModal
- Intro Blog
- Scaling Blog
Kartikay Khandelwal
- LinkedIn | - Twitter -
BetterTransformers (+ integration with Hugging Face), Model Serving and Optimizations
- Blog 1
- Github
Hamid Shojanazeri and Mark Saroufim
- Mark Saroufim
- LinkedIn | - Twitter -
PT2 and DistributedWill Constable
- LinkedIn -
- -## Watch the Talks from PyTorch Conference - -- [TorchDynamo](https://www.youtube.com/watch?v=vbtGZL7IrAw) -- [TorchInductor](https://www.youtube.com/watch?v=vbtGZL7IrAw) -- [Dynamic Shapes](https://www.youtube.com/watch?v=vbtGZL7IrAw) -- [Export Path](https://www.youtube.com/watch?v=vbtGZL7IrAw) - - diff --git a/_hub b/_hub deleted file mode 160000 index 3fae0ff2f1d6..000000000000 --- a/_hub +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 3fae0ff2f1d6e4cfc4004a213c9499fa7d996f16 diff --git a/_includes/analytics.html b/_includes/analytics.html deleted file mode 100644 index fb09ac21df90..000000000000 --- a/_includes/analytics.html +++ /dev/null @@ -1,13 +0,0 @@ - - - diff --git a/_includes/blog_modal.html b/_includes/blog_modal.html deleted file mode 100644 index eea315a4b91e..000000000000 --- a/_includes/blog_modal.html +++ /dev/null @@ -1,18 +0,0 @@ - - -
- -
- - diff --git a/_includes/community_events.html b/_includes/community_events.html deleted file mode 100644 index c9d97e6c0d09..000000000000 --- a/_includes/community_events.html +++ /dev/null @@ -1,10 +0,0 @@ - diff --git a/_includes/compact_hub_cards.html b/_includes/compact_hub_cards.html deleted file mode 100644 index 6abcd928d420..000000000000 --- a/_includes/compact_hub_cards.html +++ /dev/null @@ -1,29 +0,0 @@ -
-
- {% assign hub = site.hub | where: "category", include.category | sort: "order" %} - - {% for item in hub %} - - {% endfor %} -
-
diff --git a/_includes/contributor_side_nav.html b/_includes/contributor_side_nav.html deleted file mode 100644 index 39a8d3b13f00..000000000000 --- a/_includes/contributor_side_nav.html +++ /dev/null @@ -1,55 +0,0 @@ -
-
-
    - {% assign past_issues = site.past_issues | sort_natural: "date" | reverse %} - - {% for item in past_issues %} - {% assign currentdate = item.date | date: "%B %Y" %} - {% if currentdate != date %} - {% assign date = currentdate %} - {% endif %} - - - {% endfor %} -
-
-
- - - diff --git a/_includes/cookie_banner.html b/_includes/cookie_banner.html deleted file mode 100644 index 27e1820f4765..000000000000 --- a/_includes/cookie_banner.html +++ /dev/null @@ -1,6 +0,0 @@ - diff --git a/_includes/deep_learning_event_tracking.html b/_includes/deep_learning_event_tracking.html deleted file mode 100644 index 65f8f1ab4a0a..000000000000 --- a/_includes/deep_learning_event_tracking.html +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/_includes/deep_learning_form.html b/_includes/deep_learning_form.html deleted file mode 100644 index d5b5baa76030..000000000000 --- a/_includes/deep_learning_form.html +++ /dev/null @@ -1,22 +0,0 @@ -
- -
- - diff --git a/_includes/ecosystem_form.html b/_includes/ecosystem_form.html deleted file mode 100644 index a816be567fc5..000000000000 --- a/_includes/ecosystem_form.html +++ /dev/null @@ -1,17 +0,0 @@ -
- - - -
\ No newline at end of file diff --git a/_includes/ecosystem_sort.html b/_includes/ecosystem_sort.html deleted file mode 100644 index 3791244bae47..000000000000 --- a/_includes/ecosystem_sort.html +++ /dev/null @@ -1,41 +0,0 @@ - - Sort - - - diff --git a/_includes/educational_courses_module.html b/_includes/educational_courses_module.html deleted file mode 100644 index 23c0bfeb9493..000000000000 --- a/_includes/educational_courses_module.html +++ /dev/null @@ -1,26 +0,0 @@ -
-
-
-
-

Educational Courses

- - See all Courses - -
-
- -
- {% assign courses = site.courses | sort: 'order' %} - - {% for course in courses limit: 4 %} - - {% endfor %} -
-
-
diff --git a/_includes/events_side_nav.html b/_includes/events_side_nav.html deleted file mode 100644 index a616af89c66d..000000000000 --- a/_includes/events_side_nav.html +++ /dev/null @@ -1,48 +0,0 @@ -
-
    - {% assign events = site.events | where: "category", "event" | sort_natural: "date" | reverse %} - - {% for item in events %} - - {% endfor %} -
-
- - - diff --git a/_includes/footer.html b/_includes/footer.html deleted file mode 100644 index a74402d61751..000000000000 --- a/_includes/footer.html +++ /dev/null @@ -1,96 +0,0 @@ -
-
-
-
-

Docs

-

Access comprehensive developer documentation for PyTorch

- View Docs -
- -
-

Tutorials

-

Get in-depth tutorials for beginners and advanced developers

- View Tutorials -
- -
-

Resources

-

Find development resources and get your questions answered

- View Resources -
-
-
-
- -
- -
- -{% include mobile_menu.html %} - -{% include footer_scripts.html %} - -{% include cookie_banner.html %} diff --git a/_includes/footer_scripts.html b/_includes/footer_scripts.html deleted file mode 100644 index 97c5ed3909f0..000000000000 --- a/_includes/footer_scripts.html +++ /dev/null @@ -1,42 +0,0 @@ - - - -{% if page.layout != "deep_learning" %} - -{% endif %} - - - - -{% if jekyll.environment == 'production' %} - - -{% endif %} diff --git a/_includes/get_started_locally.html b/_includes/get_started_locally.html deleted file mode 100644 index d68edadeb9ad..000000000000 --- a/_includes/get_started_locally.html +++ /dev/null @@ -1,12 +0,0 @@ -
-
- {% include get_started_locally_side_nav.html %} -
-
-
-
- {{ content }} -
-
-
-
diff --git a/_includes/get_started_locally_side_nav.html b/_includes/get_started_locally_side_nav.html deleted file mode 100644 index c90b6a302a6f..000000000000 --- a/_includes/get_started_locally_side_nav.html +++ /dev/null @@ -1,5 +0,0 @@ -
-

Shortcuts

-
    -
    - diff --git a/_includes/get_started_via_cloud.html b/_includes/get_started_via_cloud.html deleted file mode 100644 index ba08423cafee..000000000000 --- a/_includes/get_started_via_cloud.html +++ /dev/null @@ -1,12 +0,0 @@ -
    -
    - {% include get_started_via_cloud_side_nav.html %} -
    -
    -
    -
    - {{ content }} -
    -
    -
    -
    diff --git a/_includes/get_started_via_cloud_side_nav.html b/_includes/get_started_via_cloud_side_nav.html deleted file mode 100644 index cb088dd3097c..000000000000 --- a/_includes/get_started_via_cloud_side_nav.html +++ /dev/null @@ -1,3 +0,0 @@ -
    -
      -
      diff --git a/_includes/google_pixel.html b/_includes/google_pixel.html deleted file mode 100644 index db996dd5dc69..000000000000 --- a/_includes/google_pixel.html +++ /dev/null @@ -1 +0,0 @@ - diff --git a/_includes/head.html b/_includes/head.html deleted file mode 100644 index b86b1e202467..000000000000 --- a/_includes/head.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - - - - {% if page.title %} - {{ page.title }} | PyTorch - {% else %} - PyTorch - {% endif %} - - {% include open_graph_and_meta.html %} - - - - - - - - - {% if jekyll.environment == 'production' %} - {% include pixel.html %} - {% include twitter_pixel.html %} - {% endif %} - - - diff --git a/_includes/header.html b/_includes/header.html deleted file mode 100644 index 45c484e1845e..000000000000 --- a/_includes/header.html +++ /dev/null @@ -1,10 +0,0 @@ -
      -
      - Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. -
      -
      -
      -
      - {% include nav.html %} -
      -
      diff --git a/_includes/hub_cards.html b/_includes/hub_cards.html deleted file mode 100644 index 38d367aa52b9..000000000000 --- a/_includes/hub_cards.html +++ /dev/null @@ -1,34 +0,0 @@ -
      -
      - {% assign hub = site.hub | where: "category", include.category | sort: "order" %} - - {% for item in hub %} - - {% endfor %} -
      - - -
      diff --git a/_includes/hub_developer_tags_and_cards.html b/_includes/hub_developer_tags_and_cards.html deleted file mode 100644 index 695757dfcf47..000000000000 --- a/_includes/hub_developer_tags_and_cards.html +++ /dev/null @@ -1,33 +0,0 @@ - - -
      - -{% if page.compact == true %} - - {% include compact_hub_cards.html category="developers" %} - -{% else %} - - {% include hub_cards.html category="developers" %} - -{% endif %} diff --git a/_includes/hub_icons.html b/_includes/hub_icons.html deleted file mode 100644 index 46ec4cd0c78c..000000000000 --- a/_includes/hub_icons.html +++ /dev/null @@ -1,2 +0,0 @@ - - diff --git a/_includes/hub_model_tags.html b/_includes/hub_model_tags.html deleted file mode 100644 index cadf01c94687..000000000000 --- a/_includes/hub_model_tags.html +++ /dev/null @@ -1,12 +0,0 @@ -
      - -
      diff --git a/_includes/hub_researcher_tags_and_cards.html b/_includes/hub_researcher_tags_and_cards.html deleted file mode 100644 index 375e65a8bdde..000000000000 --- a/_includes/hub_researcher_tags_and_cards.html +++ /dev/null @@ -1,33 +0,0 @@ - - -
      - -{% if page.compact == true %} - - {% include compact_hub_cards.html category="researchers" %} - -{% else %} - - {% include hub_cards.html category="researchers" %} - -{% endif %} diff --git a/_includes/hub_search.html b/_includes/hub_search.html deleted file mode 100644 index 43e227f66437..000000000000 --- a/_includes/hub_search.html +++ /dev/null @@ -1,11 +0,0 @@ -
      -
      -
      - - -
      -
      diff --git a/_includes/hub_sort.html b/_includes/hub_sort.html deleted file mode 100644 index 50a455586991..000000000000 --- a/_includes/hub_sort.html +++ /dev/null @@ -1,17 +0,0 @@ - - Sort - - - diff --git a/_includes/latest_episodes.html b/_includes/latest_episodes.html deleted file mode 100644 index ae0f982721a8..000000000000 --- a/_includes/latest_episodes.html +++ /dev/null @@ -1,36 +0,0 @@ -

      Latest Episodes

      -
      - {% assign events = site.events | where: "category", "live-stream" | sort_natural: "date" | reverse %} - {% capture now %}{{'now' | date: '%s' | plus: 0 %}}{% endcapture %} - {% for item in events | where: "type" == "live-stream" | sort: "date" %} - {% capture date %}{{item.date | date: '%s' | plus: 0 %}}{% endcapture %} - {% if date <= now %} -
      -
      - {{ item.title }} -
        -
      • {{ item.guest }}
      • -
      • {{ item.company }}
      • -
      • -
          -
        • {{ item.date | date: "%m/%d/%Y" }}
        • - | -
        • {{ item.time }}
        • -
        -
      • - -
      -
        - {% if item.poster %} -
      • Poster
      • - {% endif %} - {% if item.video %} -
      • Watch
      • - {% endif %} -
      -
      -
      - {% endif %} - {% endfor %} -
      - \ No newline at end of file diff --git a/_includes/live_event_video.html b/_includes/live_event_video.html deleted file mode 100644 index 23a40b1ee006..000000000000 --- a/_includes/live_event_video.html +++ /dev/null @@ -1,12 +0,0 @@ -
      -
      -
      - {{item.video}} -
      -
      -

      {{ item.title }}

      -

      {{ item.summary }}

      - CTA to Video -
      -
      -
      diff --git a/_includes/live_events.html b/_includes/live_events.html deleted file mode 100644 index e506c78bf17c..000000000000 --- a/_includes/live_events.html +++ /dev/null @@ -1,16 +0,0 @@ -
      -
      - {% include events_side_nav.html %} -
      -
      -
      -
      - {% assign events = site.events | where: "category", "event" | sort_natural: "date" | reverse %} - {% capture now %}{{'now' | date: '%s' | plus: 0 %}}{% endcapture %} - {% capture date %}{{item.date | date: '%s' | plus: 0 %}}{% endcapture %} - {% include upcoming-live-events.html %} - {% include past-live-events.html %} -
      -
      -
      -
      diff --git a/_includes/live_stream.html b/_includes/live_stream.html deleted file mode 100644 index d372e63ec7d9..000000000000 --- a/_includes/live_stream.html +++ /dev/null @@ -1,13 +0,0 @@ - diff --git a/_includes/main_menu.html b/_includes/main_menu.html deleted file mode 100644 index fd14bbcf6fb2..000000000000 --- a/_includes/main_menu.html +++ /dev/null @@ -1,5 +0,0 @@ - - - diff --git a/_includes/march_2021.md b/_includes/march_2021.md deleted file mode 100644 index e33189a9f2c8..000000000000 --- a/_includes/march_2021.md +++ /dev/null @@ -1,39 +0,0 @@ - -# Issue \#1 - -Welcome to the first issue of the PyTorch Contributors newsletter! Keeping track of everything that’s happening in the PyTorch developer world is a big task; here you will find curated news including RFCs, feature roadmaps, notable PRs, editorials from developers, and more. If you have questions or suggestions for the newsletter, we'd love to [hear from you](https://forms.gle/2KApHZa3oDHuAQ288) - -## PyTorch 1.8.0 - -PyTorch 1.8 was released on March 4th with support for functional transformations using `torch.fx`, stabilized frontend APIs for scientific computing (`torch.fft`, `torch.linalg`, Autograd for complex tensors) and significant improvements to distributed training. Read the full [Release Notes](https://github.com/pytorch/pytorch/releases/tag/v1.8.0){:target="_blank"}. - -## PyTorch Ecosystem Day - -On April 21, we’re hosting a virtual event for our ecosystem and industry communities to showcase their work and discover new opportunities to collaborate. The day will be filled with discussion on new developments, trends, challenges and best practices through posters, breakout sessions and networking. - -## [The PyTorch open source process](http://blog.ezyang.com/2021/01/pytorch-open-source-process/){:target="_blank"} - -[@ezyang](https://github.com/ezyang){:target="_blank"} describes the challenges of maintaining a PyTorch-scale project, and the current open source processes (triaging and CI oncalls, RFC discussions) to help PyTorch operate effectively. - -## Developers forum - -We launched https://dev-discuss.pytorch.org/ a low-traffic high-signal forum for long-form discussions about PyTorch internals. - -## [RFC] [Dataloader v2](https://github.com/pytorch/pytorch/issues/49440) - -[@VitalyFedyunin](https://github.com/VitalyFedyunin) proposes redesigning the DataLoader to support lazy loading, sharding, pipelining data operations (including async) and shuffling & sampling in a more modular way. Join the discussion [here](https://github.com/pytorch/pytorch/issues/49440). - -## [RFC] [Improving TorchScript Usability](https://dev-discuss.pytorch.org/t/torchscript-usability/55) - -In a series of 3 blog posts ([1](https://lernapparat.de/scripttorch/), [2](https://lernapparat.de/jit-python-graphops/), [3](https://lernapparat.de/jit-fallback/)) [@t-vi](https://github.com/t-vi) explores ideas to improve the user and developer experience of TorchScript. - -## [RFC] [CSR and DM storage formats for sparse tensors](https://github.com/pytorch/rfcs/pull/13) - -[@pearu](https://github.com/pearu) proposes an [RFC](https://github.com/pytorch/rfcs/pull/13) to make linear algebra operations more performant by - -- implementing the CSR storage format, where a 2D array is defined by shape and 1D tensors for compressed row indices, column indices, and values (PyTorch 1D tensor) -- introducing the Dimension Mapping storage format that generalizes a 2D CSR to multidimensional arrays using a bijective mapping between the storage and wrapper elements. - -## [RFC] [Forward Mode AD](https://github.com/pytorch/rfcs/pull/11) - -[@albanD](https://github.com/albanD) proposes an [RFC](https://github.com/pytorch/rfcs/pull/11) to implement forward mode autodiff using Tensor-based [dual numbers](https://blog.demofox.org/2014/12/30/dual-numbers-automatic-differentiation/), where the real part represents the tensor and the *dual* part stores the forward gradient of the tensor. The core of the feature has landed [(PR)](https://github.com/pytorch/pytorch/pull/49734), with more formulas in WIP. Complete forward mode AD is expected to land by July 2021. diff --git a/_includes/mobile_menu.html b/_includes/mobile_menu.html deleted file mode 100644 index dd92530a4dfe..000000000000 --- a/_includes/mobile_menu.html +++ /dev/null @@ -1,18 +0,0 @@ -
      -
      -
      -
      - - -
      -
      -
      - - -
      diff --git a/_includes/mobile_page_side_nav.html b/_includes/mobile_page_side_nav.html deleted file mode 100644 index ead73160240e..000000000000 --- a/_includes/mobile_page_side_nav.html +++ /dev/null @@ -1,5 +0,0 @@ -
      -

      Shortcuts

      -
        -
        - diff --git a/_includes/nav.html b/_includes/nav.html deleted file mode 100644 index d6291beb5b90..000000000000 --- a/_includes/nav.html +++ /dev/null @@ -1,9 +0,0 @@ -{% assign current = page.url | downcase | remove: ".html" | split: '/' %} - -
        - - - {% include main_menu.html %} - - -
        diff --git a/_includes/news_banner_info.html b/_includes/news_banner_info.html deleted file mode 100644 index 13d92d18516f..000000000000 --- a/_includes/news_banner_info.html +++ /dev/null @@ -1,33 +0,0 @@ -{% assign news_collection_size = site.news.size %} - -{% if news_collection_size == 1 %} - - - -{% elsif news_item.order == news_collection_size %} - - - - - -{% elsif news_item.order > 1 %} - - - - - -{% else %} - - - - - -{% endif %} diff --git a/_includes/newsletter_subscribe_form.html b/_includes/newsletter_subscribe_form.html deleted file mode 100644 index 2555d16239f9..000000000000 --- a/_includes/newsletter_subscribe_form.html +++ /dev/null @@ -1,28 +0,0 @@ -
        - -
        diff --git a/_includes/open_graph_and_meta.html b/_includes/open_graph_and_meta.html deleted file mode 100644 index 8feeeeac8c24..000000000000 --- a/_includes/open_graph_and_meta.html +++ /dev/null @@ -1,18 +0,0 @@ - - - -{% if page.featured-img %} - - -{% else %} - - -{% endif %} - - - - - - - - diff --git a/_includes/pagination_buttons.html b/_includes/pagination_buttons.html deleted file mode 100644 index bb32f85da452..000000000000 --- a/_includes/pagination_buttons.html +++ /dev/null @@ -1,30 +0,0 @@ -
        -
          - {% if paginator.previous_page %} -
        • - {% if page.title contains 'Blog' %} - Previous - {% else %} - Previous - {% endif %} -
        • - {% else %} -
        • - Previous -
        • - {% endif %} - - {% if paginator.next_page %} - {% if page.title contains 'Blog' %} -
        • Next - {% else %} -
        • Next - {% endif %} -
        • - {% else %} -
        • - Next -
        • - {% endif %} -
        -
        diff --git a/_includes/past-live-events.html b/_includes/past-live-events.html deleted file mode 100644 index bc3a8ab03eab..000000000000 --- a/_includes/past-live-events.html +++ /dev/null @@ -1,22 +0,0 @@ -

        Past Events

        -{% assign events = site.events | where: "category", "event" | sort_natural: "date" | reverse %} -{% capture now %}{{'now' | date: '%s' | plus: 0 %}}{% endcapture %} -{% for item in events %} - {% capture date %}{{item.date | date: '%s' | plus: 0 %}}{% endcapture %} - {% if date <= now %} -
        - {% assign events = site.events %} - {% capture date %}{{item.date | date: '%s' | plus: 0 %}}{% endcapture %} -
        -

        {{ item.title }}

        - {% if item.header-image And item.header-image != "" And item.header-image != nil %} - - {% endif %} -

        {{ item.content | markdownify }}

        - {% if item.video %} - {% include live_event_video.html %} - {% endif %} -
        -
        - {% endif %} -{% endfor %} diff --git a/_includes/past_issues.html b/_includes/past_issues.html deleted file mode 100644 index e3d20d5d7795..000000000000 --- a/_includes/past_issues.html +++ /dev/null @@ -1,17 +0,0 @@ - diff --git a/_includes/pixel.html b/_includes/pixel.html deleted file mode 100644 index 258a3a209ab7..000000000000 --- a/_includes/pixel.html +++ /dev/null @@ -1,18 +0,0 @@ - - - diff --git a/_includes/podcast.html b/_includes/podcast.html deleted file mode 100644 index 20f63324749e..000000000000 --- a/_includes/podcast.html +++ /dev/null @@ -1,26 +0,0 @@ - diff --git a/_includes/production.html b/_includes/production.html deleted file mode 100644 index f38e5cd7d63f..000000000000 --- a/_includes/production.html +++ /dev/null @@ -1,505 +0,0 @@ -
        -
        -
        - {% include production_side_nav.html %} -
        -
        -
        -
        - - - - - - - - - - - - - - - - -
        -
        -
        -
        -
        diff --git a/_includes/production_side_nav.html b/_includes/production_side_nav.html deleted file mode 100644 index dd0af6c8a834..000000000000 --- a/_includes/production_side_nav.html +++ /dev/null @@ -1,61 +0,0 @@ - - - - - diff --git a/_includes/pytorch-side-nav.html b/_includes/pytorch-side-nav.html deleted file mode 100644 index 5d4cbf1c2417..000000000000 --- a/_includes/pytorch-side-nav.html +++ /dev/null @@ -1,59 +0,0 @@ - - \ No newline at end of file diff --git a/_includes/pytorch.html b/_includes/pytorch.html deleted file mode 100644 index d2b93cfe1257..000000000000 --- a/_includes/pytorch.html +++ /dev/null @@ -1,9 +0,0 @@ -
        -
        {% include pytorch-side-nav.html %}
        -
        -
        -
        {{ content }}
        -
        -
        -
        - \ No newline at end of file diff --git a/_includes/quick-start-module.js b/_includes/quick-start-module.js deleted file mode 100644 index 345c1d0434bb..000000000000 --- a/_includes/quick-start-module.js +++ /dev/null @@ -1,284 +0,0 @@ -// Keys are Substrings as diplayed by navigator.platform -var supportedOperatingSystems = new Map([ - ['linux', 'linux'], - ['mac', 'macos'], - ['win', 'windows'], -]); - -var archInfoMap = new Map([ - ['cuda', {title: "CUDA", platforms: new Set(['linux', 'windows'])}], - ['rocm', {title: "ROCm", platforms: new Set(['linux'])}], - ['accnone', {title: "CPU", platforms: new Set(['linux', 'macos', 'windows'])}] -]); - -let version_map={{ ACC ARCH MAP }} -let stable_version={{ VERSION }}; - -var default_selected_os = getAnchorSelectedOS() || getDefaultSelectedOS(); -var opts = { - cuda: getPreferredCuda(default_selected_os), - os: default_selected_os, - pm: 'pip', - language: 'python', - ptbuild: 'stable', -}; - -var supportedCloudPlatforms = [ - 'aws', - 'google-cloud', - 'microsoft-azure', - 'lightning-studios', -]; - -var os = $(".os > .option"); -var package = $(".package > .option"); -var language = $(".language > .option"); -var cuda = $(".cuda > .option"); -var ptbuild = $(".ptbuild > .option"); - -os.on("click", function() { - selectedOption(os, this, "os"); -}); -package.on("click", function() { - selectedOption(package, this, "pm"); -}); -language.on("click", function() { - selectedOption(language, this, "language"); -}); -cuda.on("click", function() { - selectedOption(cuda, this, "cuda"); -}); -ptbuild.on("click", function() { - selectedOption(ptbuild, this, "ptbuild") -}); - -// Pre-select user's operating system -$(function() { - var userOsOption = document.getElementById(opts.os); - var userCudaOption = document.getElementById(opts.cuda); - if (userOsOption) { - $(userOsOption).trigger("click"); - } - if (userCudaOption) { - $(userCudaOption).trigger("click"); - } -}); - - -// determine os (mac, linux, windows) based on user's platform -function getDefaultSelectedOS() { - var platform = navigator.platform.toLowerCase(); - for (var [navPlatformSubstring, os] of supportedOperatingSystems.entries()) { - if (platform.indexOf(navPlatformSubstring) !== -1) { - return os; - } - } - // Just return something if user platform is not in our supported map - return supportedOperatingSystems.values().next().value; -} - -// determine os based on location hash -function getAnchorSelectedOS() { - var anchor = location.hash; - var ANCHOR_REGEX = /^#[^ ]+$/; - // Look for anchor in the href - if (!ANCHOR_REGEX.test(anchor)) { - return false; - } - // Look for anchor with OS in the first portion - var testOS = anchor.slice(1).split("-")[0]; - for (var [navPlatformSubstring, os] of supportedOperatingSystems.entries()) { - if (testOS.indexOf(navPlatformSubstring) !== -1) { - return os; - } - } - return false; -} - -// determine CUDA version based on OS -function getPreferredCuda(os) { - // Only CPU builds are currently available for MacOS - if (os == 'macos') { - return 'accnone'; - } - return 'cuda.x'; -} - -// Disable compute platform not supported on OS -function disableUnsupportedPlatforms(os) { - - if(opts.ptbuild == "preview") - archMap = version_map.nightly - else - archMap = version_map.release - - for (const [arch_key, info] of archInfoMap) { - var elems = document.querySelectorAll('[id^="'+arch_key+'"]'); - if (elems == null) { - console.log("Failed to find element for architecture " + arch_key); - return; - } - for (var i=0; i < elems.length;i++) { - var supported = info.platforms.has(os); - elems[i].style.textDecoration = supported ? "" : "line-through"; - - // Officially supported arch but not available - if(!archMap[elems[i].id]) { - elems[i].style.textDecoration = "line-through"; - } - } - } -} - -// Change compute versions depending on build type -function changeVersion(ptbuild) { - - if(ptbuild == "preview") - archMap = version_map.nightly - else - archMap = version_map.release - - for (const [arch_key, info] of archInfoMap) { - var elems = document.querySelectorAll('[id^="'+arch_key+'"]'); - for (var i=0; i < elems.length;i++) { - if(archMap[elems[i].id]) { - elems[i].style.textDecoration = ""; - elems[i].children[0].textContent = info.title + " " + archMap[elems[i].id][1] - } else { - elems[i].style.textDecoration = "line-through"; - } - } - } - var stable_element = document.getElementById("stable"); - stable_element.children[0].textContent = stable_version; -} - - - -// Change accnone name depending on OS type -function changeAccNoneName(osname) { - var accnone_element = document.getElementById("accnone"); - if (accnone_element == null) { - console.log("Failed to find accnone element"); - return; - } - if (osname == "macos") { - accnone_element.children[0].textContent = "Default"; - } else { - accnone_element.children[0].textContent = "CPU"; - } -} - -function selectedOption(option, selection, category) { - $(option).removeClass("selected"); - $(selection).addClass("selected"); - opts[category] = selection.id; - if (category === "pm") { - var elements = document.getElementsByClassName("language")[0].children; - if (selection.id !== "libtorch" && elements["cplusplus"].classList.contains("selected")) { - $(elements["cplusplus"]).removeClass("selected"); - $(elements["python"]).addClass("selected"); - opts["language"] = "python"; - } else if (selection.id == "libtorch") { - for (var i = 0; i < elements.length; i++) { - if (elements[i].id === "cplusplus") { - $(elements[i]).addClass("selected"); - opts["language"] = "cplusplus"; - } else { - $(elements[i]).removeClass("selected"); - } - } - } - } else if (category === "language") { - var elements = document.getElementsByClassName("package")[0].children; - if (selection.id !== "cplusplus" && elements["libtorch"].classList.contains("selected")) { - $(elements["libtorch"]).removeClass("selected"); - $(elements["pip"]).addClass("selected"); - opts["pm"] = "pip"; - } else if (selection.id == "cplusplus") { - for (var i = 0; i < elements.length; i++) { - if (elements[i].id === "libtorch") { - $(elements[i]).addClass("selected"); - opts["pm"] = "libtorch"; - } else { - $(elements[i]).removeClass("selected"); - } - } - } - } else if (category == "ptbuild") { - changeVersion(opts.ptbuild); - //make sure unsupported platforms are disabled - disableUnsupportedPlatforms(opts.os); - } - commandMessage(buildMatcher()); - if (category === "os") { - disableUnsupportedPlatforms(opts.os); - display(opts.os, 'installation', 'os'); - } - changeAccNoneName(opts.os); -} - -function display(selection, id, category) { - var container = document.getElementById(id); - // Check if there's a container to display the selection - if (container === null) { - return; - } - var elements = container.getElementsByClassName(category); - for (var i = 0; i < elements.length; i++) { - if (elements[i].classList.contains(selection)) { - $(elements[i]).addClass("selected"); - } else { - $(elements[i]).removeClass("selected"); - } - } -} - -function buildMatcher() { - return ( - opts.ptbuild.toLowerCase() + - "," + - opts.pm.toLowerCase() + - "," + - opts.os.toLowerCase() + - "," + - opts.cuda.toLowerCase() + - "," + - opts.language.toLowerCase() - ); -} - -// Cloud Partners sub-menu toggle listeners -$("[data-toggle='cloud-dropdown']").on("click", function(e) { - if ($(this).hasClass("open")) { - $(this).removeClass("open"); - // If you deselect a current drop-down item, don't display it's info any longer - display(null, 'cloud', 'platform'); - } else { - $("[data-toggle='cloud-dropdown'].open").removeClass("open"); - $(this).addClass("open"); - var cls = $(this).find(".cloud-option-body")[0].className; - for (var i = 0; i < supportedCloudPlatforms.length; i++) { - if (cls.includes(supportedCloudPlatforms[i])) { - display(supportedCloudPlatforms[i], 'cloud', 'platform'); - } - } - } -}); - -function commandMessage(key) { - var object = {{ installMatrix }}; - - if (!object.hasOwnProperty(key)) { - $("#command").html( - "
         # Follow instructions at this URL: https://github.com/pytorch/pytorch#from-source 
        " - ); - } else if (key.indexOf("lts") == 0 && key.indexOf('rocm') < 0) { - $("#command").html("
        " + object[key] + "
        "); - } else { - $("#command").html("
        " + object[key] + "
        "); - } -} - -// Set cuda version right away -changeVersion("stable") diff --git a/_includes/quick_start_cloud_options.html b/_includes/quick_start_cloud_options.html deleted file mode 100644 index 5951f7b71002..000000000000 --- a/_includes/quick_start_cloud_options.html +++ /dev/null @@ -1,58 +0,0 @@ -
        - - -
        -
        -
        - Google Cloud Platform -
        - - - - - -
        -
        - -
        -
        -
        -

        Microsoft Azure

        -
        - - -
        -
        - -
        -
        -
        - Lightning Studios -
        - -
        -
        -
        diff --git a/_includes/quick_start_local.html b/_includes/quick_start_local.html deleted file mode 100644 index 81bd69fbf1d4..000000000000 --- a/_includes/quick_start_local.html +++ /dev/null @@ -1,112 +0,0 @@ -

        Select your preferences and run the install command. Stable represents the most currently tested and supported version of PyTorch. This should - be suitable for many users. Preview is available if you want the latest, not fully tested and supported, builds that are generated nightly. - Please ensure that you have met the prerequisites below (e.g., numpy), depending on your package manager. You can also - install previous versions of PyTorch. Note that LibTorch is only available for C++. -

        - -

        NOTE: Latest PyTorch requires Python 3.9 or later.

        - -
        -
        -
        -
        PyTorch Build
        -
        -
        -
        Your OS
        -
        -
        -
        Package
        -
        -
        -
        Language
        -
        -
        -
        Compute Platform
        -
        -
        -
        Run this Command:
        -
        -
        - -
        -
        -
        -
        PyTorch Build
        -
        -
        -
        Stable (1.13.0)
        -
        -
        -
        Preview (Nightly)
        -
        -
        -
        -
        -
        Your OS
        -
        -
        -
        Linux
        -
        -
        -
        Mac
        -
        -
        -
        Windows
        -
        -
        -
        -
        -
        Package
        -
        -
        -
        Pip
        -
        -
        -
        LibTorch
        -
        -
        -
        Source
        -
        -
        -
        -
        -
        Language
        -
        -
        -
        Python
        -
        -
        -
        C++ / Java
        -
        -
        -
        -
        -
        Compute Platform
        -
        -
        -
        CUDA 11.8
        -
        -
        -
        CUDA 12.1
        -
        -
        -
        CUDA 12.4
        -
        -
        -
        ROCm 5.2
        -
        -
        -
        CPU
        -
        -
        -
        -
        -
        Run this Command:
        -
        -
        -
        pip install torch torchvision
        -
        -
        -
        -
        -
        diff --git a/_includes/quick_start_module.html b/_includes/quick_start_module.html deleted file mode 100644 index 7fabcb5c55f3..000000000000 --- a/_includes/quick_start_module.html +++ /dev/null @@ -1,26 +0,0 @@ -
        -
        -
        -
        - -

        Install PyTorch

        - - {% include quick_start_local.html %} - - - Previous versions of PyTorch - -
        - -
        -

        Quick Start With
        Cloud Partners

        - -

        Get up and running with PyTorch quickly through popular cloud platforms and machine learning services.

        - - {% include quick_start_cloud_options.html %} -
        -
        -
        -
        - - diff --git a/_includes/research.html b/_includes/research.html deleted file mode 100644 index 220864e26d9a..000000000000 --- a/_includes/research.html +++ /dev/null @@ -1,186 +0,0 @@ - diff --git a/_includes/research_side_nav.html b/_includes/research_side_nav.html deleted file mode 100644 index a76229dc104e..000000000000 --- a/_includes/research_side_nav.html +++ /dev/null @@ -1,65 +0,0 @@ - - - - diff --git a/_includes/sample_code_block.html b/_includes/sample_code_block.html deleted file mode 100644 index ffd0b1e03039..000000000000 --- a/_includes/sample_code_block.html +++ /dev/null @@ -1,12 +0,0 @@ -{% highlight python %} -#!/usr/bin/python3 - -# Simple while loop -a = 0 -while a < 15: - print(a, end=' ') - if a == 10: - print("made it to ten!!") - a = a + 1 -print() -{% endhighlight %} diff --git a/_includes/similar_posts_module.html b/_includes/similar_posts_module.html deleted file mode 100644 index 8c227730aa3e..000000000000 --- a/_includes/similar_posts_module.html +++ /dev/null @@ -1,40 +0,0 @@ -{% assign current_category = page.category.first %} - -{% if current_category != nil %} - -
        -
        - - - -
        - {% for category in current_category %} - {% for post in site.categories[category] limit: 3 %} - {% if post.url != page.url %} - -
        -

        {{ post.date | date: '%B %d, %Y' }}

        -

        - {{ post.title }} -

        -

        {{ post.excerpt | remove: '

        ' | remove: '

        ' | truncate: 150 }}

        -
        - {% endif %} - {% endfor %} - {% endfor %} -
        -
        -
        - -{% endif %} diff --git a/_includes/svgs/pytorch-language.svg b/_includes/svgs/pytorch-language.svg deleted file mode 100644 index da71062e1374..000000000000 --- a/_includes/svgs/pytorch-language.svg +++ /dev/null @@ -1,30 +0,0 @@ - - - - Group 5 - Created with Sketch. - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/_includes/tag_manager_script.html b/_includes/tag_manager_script.html deleted file mode 100644 index 1ba3f99cdea9..000000000000 --- a/_includes/tag_manager_script.html +++ /dev/null @@ -1,4 +0,0 @@ - - - diff --git a/_includes/twitter_pixel.html b/_includes/twitter_pixel.html deleted file mode 100644 index 3633c20fe253..000000000000 --- a/_includes/twitter_pixel.html +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/_includes/upcoming-live-events.html b/_includes/upcoming-live-events.html deleted file mode 100644 index d0d0bd02a865..000000000000 --- a/_includes/upcoming-live-events.html +++ /dev/null @@ -1,22 +0,0 @@ -

        Upcoming Events

        -{% assign events = site.events | where: "category", "event" | sort_natural: "date" %} -{% capture now %}{{'now' | date: '%s' | plus: 0 %}}{% endcapture %} -{% for item in events %} - {% capture date %}{{item.date | date: '%s' | plus: 0 %}}{% endcapture %} - {% if date >= now %} -
        - {% assign events = site.events %} - {% capture date %}{{item.date | date: '%s' | plus: 0 %}}{% endcapture %} -
        -

        {{ item.title }}

        - {% if item.header-image And item.header-image != "" And item.header-image != nil %} - - {% endif %} -

        {{ item.content | markdownify }}

        - {% if item.video %} - {% include live_event_video.html %} - {% endif %} -
        -
        - {% endif %} -{% endfor %} diff --git a/_includes/upcoming_episodes.html b/_includes/upcoming_episodes.html deleted file mode 100644 index 6a2fc529d8e2..000000000000 --- a/_includes/upcoming_episodes.html +++ /dev/null @@ -1,30 +0,0 @@ - diff --git a/_layouts/blog.html b/_layouts/blog.html deleted file mode 100644 index ca0613176c02..000000000000 --- a/_layouts/blog.html +++ /dev/null @@ -1,71 +0,0 @@ - - -{% include head.html %} - - - {% include tag_manager_script.html %} - {% include header.html %} - -
        - - {% assign posts = paginator.posts %} - {% assign display_post_categories = site.posts | map: 'categories' | join: ',' | replace: '-', ' ' | split: ',' | uniq | sort %} - {% assign current_page = page.url | downcase | remove: ".html" | split: '/' %} - {% assign post_categories = site.posts | map: 'categories' | join: ',' | split: ',' | uniq | sort %} - -
        -
        - {% for post in posts limit:1 %} -

        Featured Post

        -

        - {{ post.title }} -

        - - - - Read More - - - {% endfor %} -
        -
        - -
        -
        -
        -
        - - {% for post in posts %} - - {% if forloop.first %} - {% continue %} - {% endif %} - -
        -
        -

        {{ post.date | date: '%B %d, %Y' }}

        -

        - {{ post.title }} -

        -

        {{ post.excerpt | strip_html | truncate: 500}}

        - -
        - - Read More - -
        - {% endfor %} -
        - - {% include pagination_buttons.html %} -
        -
        -
        - - {% include quick_start_module.html %} - - {% include footer.html %} - - - - diff --git a/_layouts/blog_detail.html b/_layouts/blog_detail.html deleted file mode 100644 index eb80011a163b..000000000000 --- a/_layouts/blog_detail.html +++ /dev/null @@ -1,52 +0,0 @@ - - -{% include head.html %} - - - {% include tag_manager_script.html %} -
        -
        -
        - Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. -
        -
        - -
        -
        - {% include nav.html %} -
        -
        - -
        -
        -

        {{ page.date | date: '%B %d, %Y' }}

        -

        - {{ page.title }} -

        -
        -
        - -
        -
        -
        - -
        -

        - by - {% if page.author %} - {{ page.author }} - {% else %} - {{ site.default_author }} - {% endif %} -

        - {{ content }} -
        -
        -
        -
        - - - {% include footer.html %} - - - diff --git a/_layouts/deep_learning.html b/_layouts/deep_learning.html deleted file mode 100644 index bb9680d24a22..000000000000 --- a/_layouts/deep_learning.html +++ /dev/null @@ -1,52 +0,0 @@ - - - {% include head.html %} -
        -
        -
        - -
        -
        -
        - - - {% include tag_manager_script.html %} -
        - -
        -
        -
        -
        -

        Deep Learning
        with PyTorch

        - {% if page.deep-learning-landing == true %} -

        Download a free copy of the full book and learn how to get started with AI / ML development using PyTorch

        - {% else %} -

        Thanks for requesting a copy of the Deep Learning with PyTorch book! - Click here to download the book. -

        - {% endif %} -
        -
        - -
        -
        -
        -
        - -
        -
        -
        - - {{ content }} - -
        -
        -
        - - {% include footer.html %} - - {% if page.deep-learning-landing == false and jekyll.environment == 'production' %} - {% include deep_learning_event_tracking.html %} - {% endif %} - - diff --git a/_layouts/default.html b/_layouts/default.html deleted file mode 100644 index a652d10f1b78..000000000000 --- a/_layouts/default.html +++ /dev/null @@ -1,14 +0,0 @@ - - - {% include head.html %} - - {% include tag_manager_script.html %} - {% include header.html %} - -
        - - {{ content }} - - {% include footer.html %} - - diff --git a/_layouts/docs_redirect.html b/_layouts/docs_redirect.html deleted file mode 100755 index bbf46b1847cc..000000000000 --- a/_layouts/docs_redirect.html +++ /dev/null @@ -1,17 +0,0 @@ - - - - - - - Page Redirection - - - {% include tag_manager_script.html %} - If you are not redirected automatically, follow this link to the latest documentation. -
        - If you want to view documentation for a particular version, follow this link. - - diff --git a/_layouts/ecosystem_detail.html b/_layouts/ecosystem_detail.html deleted file mode 100644 index 295718b58280..000000000000 --- a/_layouts/ecosystem_detail.html +++ /dev/null @@ -1,69 +0,0 @@ - - - {% include head.html %} - - {% include tag_manager_script.html %} - {% include header.html %} - -
        - -
        -
        -

        {{ page.title }}

        - - {% include svgs/pytorch-language.svg %} - -

        {{ page.summary }}

        - - - {{ page.call-to-action }} - -
        -
        - -
        -
        -
        -
        - {{ content }} -
        -
        - -
        -
        -
        -
        -

        Similar Projects

        - - - See all Projects - -
        -
        - -
        - {% for item in site.ecosystem limit:3 %} - - {% endfor %} -
        -
        -
        -
        -
        - - {% include footer.html %} - - diff --git a/_layouts/general.html b/_layouts/general.html deleted file mode 100644 index 0a873f21244d..000000000000 --- a/_layouts/general.html +++ /dev/null @@ -1,25 +0,0 @@ - - -{% include head.html %} - - - {% include tag_manager_script.html %} -
        -
        - {% include nav.html %} -
        -
        - -
        -
        -
        -
        - {{ content }} -
        -
        -
        -
        - - {% include footer.html %} - - diff --git a/_layouts/get_started.html b/_layouts/get_started.html deleted file mode 100644 index 66c887cf42e4..000000000000 --- a/_layouts/get_started.html +++ /dev/null @@ -1,61 +0,0 @@ - - - {% include head.html %} - - {% include tag_manager_script.html %} {% include header.html %} - -
        - - {% assign items = site.get_started | where: "published",true | sort: "order" - %} - -
        -
        -

        Get Started

        - -

        - Select preferences and run the command to install PyTorch locally, or - get started quickly with one of the supported cloud platforms. -

        -
        -
        - -
        -
        - - -
        - {% if page.get-started-locally == true %} {% include - get_started_locally.html %} {% elsif page.order == 2 %} {% include - pytorch.html %} {% elsif page.get-started-via-cloud == true %} {% - include get_started_via_cloud.html %} {% else %} - -
        -
        {{ content }}
        -
        - - {% endif %} -
        -
        -
        - - {% include footer.html %} - - diff --git a/_layouts/hub_detail.html b/_layouts/hub_detail.html deleted file mode 100644 index 0adaa77d299a..000000000000 --- a/_layouts/hub_detail.html +++ /dev/null @@ -1,69 +0,0 @@ - - - {% include head.html %} - - {% include tag_manager_script.html %} - {% include header.html %} - -
        - -
        -
        - - {% if page.category == 'researchers' %} - < - {% else %} - < - {% endif %} - -

        - {{ page.title }} -

        - -
        -
        -

        By {{ page.author }}

        -
        - -
        -

        {{ page.summary }}

        -
        - - - {% if page.demo-model-link %} - {% if page.demo-model-button-text == blank or page.demo-model-button-text == nil %} - - {% else %} - - {% endif %} - {% endif %} -
        -
        -
        -
        -
        - -
        -
        -
        -
        -
        - - -
        -
        - -
        -
        -
        -
        -
        - - {% include footer.html %} - - - - diff --git a/_layouts/hub_index.html b/_layouts/hub_index.html deleted file mode 100644 index 989bd0fa94ed..000000000000 --- a/_layouts/hub_index.html +++ /dev/null @@ -1,51 +0,0 @@ - - - {% include head.html %} - - {% include tag_manager_script.html %} - {% include header.html %} - -
        - -
        -
        -

        - PyTorch Hub
        - {{ page.title }} -

        - -

        {{ page.summary }}

        -
        -
        - -
        -
        -
        - -
        -
        - {{content}} -
        -
        -
        -
        -
        - - {% include footer.html %} - - - - - - - -{% if page.compact == true %} - -
        - -{% else %} -
        - -{% endif %} - - diff --git a/_layouts/marketo_email.html b/_layouts/marketo_email.html deleted file mode 100644 index 0a873f21244d..000000000000 --- a/_layouts/marketo_email.html +++ /dev/null @@ -1,25 +0,0 @@ - - -{% include head.html %} - - - {% include tag_manager_script.html %} -
        -
        - {% include nav.html %} -
        -
        - -
        -
        -
        -
        - {{ content }} -
        -
        -
        -
        - - {% include footer.html %} - - diff --git a/_layouts/mobile.html b/_layouts/mobile.html deleted file mode 100644 index 840a42b5f479..000000000000 --- a/_layouts/mobile.html +++ /dev/null @@ -1,57 +0,0 @@ - - - {% include head.html %} - - {% include tag_manager_script.html %} - {% include header.html %} - -
        - - {% assign mobile_items = site.mobile | where: "published",true | sort: "order" %} - -
        -
        -

        PyTorch Mobile

        - -

        End-to-end workflow from Training to Deployment for iOS and Android mobile devices

        -
        -
        - -
        -
        - - -
        -
        -
        - {% include mobile_page_side_nav.html %} -
        -
        -
        -
        - {{ content }} -
        -
        -
        -
        -
        -
        -
        - - {% include footer.html %} - - - - diff --git a/_mobile/android.md b/_mobile/android.md deleted file mode 100644 index 0acaeb333138..000000000000 --- a/_mobile/android.md +++ /dev/null @@ -1,431 +0,0 @@ ---- -layout: mobile -title: Android -permalink: /mobile/android/ -background-class: mobile-background -body-class: mobile -order: 3 -published: true ---- - -
        -

        Note

        -

        PyTorch Mobile is no longer actively supported. Please check out ExecuTorch, PyTorch’s all-new on-device inference library. You can also review this page to learn more about how to use ExecuTorch to build an Android app.

        -
        - -# Android - -## Quickstart with a HelloWorld Example - -[HelloWorld](https://github.com/pytorch/android-demo-app/tree/master/HelloWorldApp) is a simple image classification application that demonstrates how to use PyTorch Android API. -This application runs TorchScript serialized TorchVision pretrained resnet18 model on static image which is packaged inside the app as android asset. - -#### 1. Model Preparation - -Let’s start with model preparation. If you are familiar with PyTorch, you probably should already know how to train and save your model. In case you don’t, we are going to use a pre-trained image classification model ([MobileNetV2](https://pytorch.org/hub/pytorch_vision_mobilenet_v2/)). -To install it, run the command below: -``` -pip install torchvision -``` - -To serialize the model you can use python [script](https://github.com/pytorch/android-demo-app/blob/master/HelloWorldApp/trace_model.py) in the root folder of HelloWorld app: -``` -import torch -import torchvision -from torch.utils.mobile_optimizer import optimize_for_mobile - -model = torchvision.models.mobilenet_v2(pretrained=True) -model.eval() -example = torch.rand(1, 3, 224, 224) -traced_script_module = torch.jit.trace(model, example) -traced_script_module_optimized = optimize_for_mobile(traced_script_module) -traced_script_module_optimized._save_for_lite_interpreter("app/src/main/assets/model.ptl") - -``` -If everything works well, we should have our model - `model.ptl` generated in the assets folder of android application. -That will be packaged inside android application as `asset` and can be used on the device. - -More details about TorchScript you can find in [tutorials on pytorch.org](https://pytorch.org/docs/stable/jit.html) - -#### 2. Cloning from github -``` -git clone https://github.com/pytorch/android-demo-app.git -cd HelloWorldApp -``` -If [Android SDK](https://developer.android.com/studio/index.html#command-tools) and [Android NDK](https://developer.android.com/ndk/downloads) are already installed you can install this application to the connected android device or emulator with: -``` -./gradlew installDebug -``` - -We recommend you to open this project in [Android Studio 3.5.1+](https://developer.android.com/studio). At the moment PyTorch Android and demo applications use [android gradle plugin of version 3.5.0](https://developer.android.com/studio/releases/gradle-plugin#3-5-0), which is supported only by Android Studio version 3.5.1 and higher. -Using Android Studio you will be able to install Android NDK and Android SDK with Android Studio UI. - -#### 3. Gradle dependencies - -Pytorch android is added to the HelloWorld as [gradle dependencies](https://github.com/pytorch/android-demo-app/blob/master/HelloWorldApp/app/build.gradle#L28-L29) in build.gradle: - -``` -repositories { - jcenter() -} - -dependencies { - implementation 'org.pytorch:pytorch_android_lite:1.9.0' - implementation 'org.pytorch:pytorch_android_torchvision:1.9.0' -} -``` -Where `org.pytorch:pytorch_android` is the main dependency with PyTorch Android API, including libtorch native library for all 4 android abis (armeabi-v7a, arm64-v8a, x86, x86_64). -Further in this doc you can find how to rebuild it only for specific list of android abis. - -`org.pytorch:pytorch_android_torchvision` - additional library with utility functions for converting `android.media.Image` and `android.graphics.Bitmap` to tensors. - -#### 4. Reading image from Android Asset - -All the logic happens in [`org.pytorch.helloworld.MainActivity`](https://github.com/pytorch/android-demo-app/blob/master/HelloWorldApp/app/src/main/java/org/pytorch/helloworld/MainActivity.java#L31-L69). -As a first step we read `image.jpg` to `android.graphics.Bitmap` using the standard Android API. -``` -Bitmap bitmap = BitmapFactory.decodeStream(getAssets().open("image.jpg")); -``` - -#### 5. Loading Mobile Module -``` -Module module = Module.load(assetFilePath(this, "model.ptl")); -``` -`org.pytorch.Module` represents `torch::jit::mobile::Module` that can be loaded with `load` method specifying file path to the serialized to file model. - -#### 6. Preparing Input -``` -Tensor inputTensor = TensorImageUtils.bitmapToFloat32Tensor(bitmap, - TensorImageUtils.TORCHVISION_NORM_MEAN_RGB, TensorImageUtils.TORCHVISION_NORM_STD_RGB); -``` -`org.pytorch.torchvision.TensorImageUtils` is part of `org.pytorch:pytorch_android_torchvision` library. -The `TensorImageUtils#bitmapToFloat32Tensor` method creates tensors in the [torchvision format](https://pytorch.org/vision/stable/models.html) using `android.graphics.Bitmap` as a source. - -> All pre-trained models expect input images normalized in the same way, i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. -> The images have to be loaded in to a range of `[0, 1]` and then normalized using `mean = [0.485, 0.456, 0.406]` and `std = [0.229, 0.224, 0.225]` - -`inputTensor`'s shape is `1x3xHxW`, where `H` and `W` are bitmap height and width appropriately. - -#### 7. Run Inference - -``` -Tensor outputTensor = module.forward(IValue.from(inputTensor)).toTensor(); -float[] scores = outputTensor.getDataAsFloatArray(); -``` - -`org.pytorch.Module.forward` method runs loaded module's `forward` method and gets result as `org.pytorch.Tensor` outputTensor with shape `1x1000`. - -#### 8. Processing results -Its content is retrieved using `org.pytorch.Tensor.getDataAsFloatArray()` method that returns java array of floats with scores for every image net class. - -After that we just find index with maximum score and retrieve predicted class name from `ImageNetClasses.IMAGENET_CLASSES` array that contains all ImageNet classes. - -``` -float maxScore = -Float.MAX_VALUE; -int maxScoreIdx = -1; -for (int i = 0; i < scores.length; i++) { - if (scores[i] > maxScore) { - maxScore = scores[i]; - maxScoreIdx = i; - } -} -String className = ImageNetClasses.IMAGENET_CLASSES[maxScoreIdx]; -``` - -In the following sections you can find detailed explanations of PyTorch Android API, code walk through for a bigger [demo application](https://github.com/pytorch/android-demo-app/tree/master/PyTorchDemoApp), -implementation details of the API, how to customize and build it from source. - -## PyTorch Demo Application - -We have also created another more complex PyTorch Android demo application that does image classification from camera output and text classification in the [same github repo](https://github.com/pytorch/android-demo-app/tree/master/PyTorchDemoApp). - -To get device camera output it uses [Android CameraX API](https://developer.android.com/training/camerax -). -All the logic that works with CameraX is separated to [`org.pytorch.demo.vision.AbstractCameraXActivity`](https://github.com/pytorch/android-demo-app/blob/master/PyTorchDemoApp/app/src/main/java/org/pytorch/demo/vision/AbstractCameraXActivity.java) class. - - -``` -void setupCameraX() { - final PreviewConfig previewConfig = new PreviewConfig.Builder().build(); - final Preview preview = new Preview(previewConfig); - preview.setOnPreviewOutputUpdateListener(output -> mTextureView.setSurfaceTexture(output.getSurfaceTexture())); - - final ImageAnalysisConfig imageAnalysisConfig = - new ImageAnalysisConfig.Builder() - .setTargetResolution(new Size(224, 224)) - .setCallbackHandler(mBackgroundHandler) - .setImageReaderMode(ImageAnalysis.ImageReaderMode.ACQUIRE_LATEST_IMAGE) - .build(); - final ImageAnalysis imageAnalysis = new ImageAnalysis(imageAnalysisConfig); - imageAnalysis.setAnalyzer( - (image, rotationDegrees) -> { - analyzeImage(image, rotationDegrees); - }); - - CameraX.bindToLifecycle(this, preview, imageAnalysis); - } - - void analyzeImage(android.media.Image, int rotationDegrees) -``` - -Where the `analyzeImage` method process the camera output, `android.media.Image`. - -It uses the aforementioned [`TensorImageUtils.imageYUV420CenterCropToFloat32Tensor`](https://github.com/pytorch/pytorch/blob/master/android/pytorch_android_torchvision/src/main/java/org/pytorch/torchvision/TensorImageUtils.java#L90) method to convert `android.media.Image` in `YUV420` format to input tensor. - -After getting predicted scores from the model it finds top K classes with the highest scores and shows on the UI. - -#### Language Processing Example - -Another example is natural language processing, based on an LSTM model, trained on a reddit comments dataset. -The logic happens in [`TextClassificattionActivity`](https://github.com/pytorch/android-demo-app/blob/master/PyTorchDemoApp/app/src/main/java/org/pytorch/demo/nlp/TextClassificationActivity.java). - -Result class names are packaged inside the TorchScript model and initialized just after initial module initialization. -The module has a `get_classes` method that returns `List[str]`, which can be called using method `Module.runMethod(methodName)`: -``` - mModule = Module.load(moduleFileAbsoluteFilePath); - IValue getClassesOutput = mModule.runMethod("get_classes"); -``` -The returned `IValue` can be converted to java array of `IValue` using `IValue.toList()` and processed to an array of strings using `IValue.toStr()`: -``` - IValue[] classesListIValue = getClassesOutput.toList(); - String[] moduleClasses = new String[classesListIValue.length]; - int i = 0; - for (IValue iv : classesListIValue) { - moduleClasses[i++] = iv.toStr(); - } -``` - -Entered text is converted to java array of bytes with `UTF-8` encoding. `Tensor.fromBlobUnsigned` creates tensor of `dtype=uint8` from that array of bytes. -``` - byte[] bytes = text.getBytes(Charset.forName("UTF-8")); - final long[] shape = new long[]{1, bytes.length}; - final Tensor inputTensor = Tensor.fromBlobUnsigned(bytes, shape); -``` - -Running inference of the model is similar to previous examples: -``` -Tensor outputTensor = mModule.forward(IValue.from(inputTensor)).toTensor() -``` - -After that, the code processes the output, finding classes with the highest scores. - -## More PyTorch Android Demo Apps - -### D2go - -[D2Go](https://github.com/pytorch/android-demo-app/tree/master/D2Go) demonstrates a Python script that creates the much lighter and much faster Facebook [D2Go](https://github.com/facebookresearch/d2go) model that is powered by PyTorch 1.8, torchvision 0.9, and Detectron2 with built-in SOTA networks for mobile, and an Android app that uses it to detect objects from pictures in your photos, taken with camera, or with live camera. This demo app also shows how to use the native pre-built torchvision-ops library. - -### Image Segmentation - -[Image Segmentation](https://github.com/pytorch/android-demo-app/tree/master/ImageSegmentation) demonstrates a Python script that converts the PyTorch [DeepLabV3](https://pytorch.org/hub/pytorch_vision_deeplabv3_resnet101/) model and an Android app that uses the model to segment images. - -### Object Detection - -[Object Detection](https://github.com/pytorch/android-demo-app/tree/master/ObjectDetection) demonstrates how to convert the popular [YOLOv5](https://pytorch.org/hub/ultralytics_yolov5/) model and use it in an Android app that detects objects from pictures in your photos, taken with camera, or with live camera. - -### Neural Machine Translation - -[Neural Machine Translation](https://github.com/pytorch/android-demo-app/tree/master/Seq2SeqNMT) demonstrates how to convert a sequence-to-sequence neural machine translation model trained with the code in the [PyTorch NMT tutorial](https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html) and use the model in an Android app to do French-English translation. - -### Question Answering - -[Question Answering](https://github.com/pytorch/android-demo-app/tree/master/QuestionAnswering) demonstrates how to convert a powerful transformer QA model and use the model in an Android app to answer questions about PyTorch Mobile and more. - -### Vision Transformer - -[Vision Transformer](https://github.com/pytorch/android-demo-app/tree/master/ViT4MNIST) demonstrates how to use Facebook's latest Vision Transformer [DeiT](https://github.com/facebookresearch/deit) model to do image classification, and how convert another Vision Transformer model and use it in an Android app to perform handwritten digit recognition. - -### Speech recognition - -[Speech Recognition](https://github.com/pytorch/android-demo-app/tree/master/SpeechRecognition) demonstrates how to convert Facebook AI's wav2vec 2.0, one of the leading models in speech recognition, to TorchScript and how to use the scripted model in an Android app to perform speech recognition. - -### Video Classification - -[TorchVideo](https://github.com/pytorch/android-demo-app/tree/master/TorchVideo) demonstrates how to use a pre-trained video classification model, available at the newly released [PyTorchVideo](https://github.com/facebookresearch/pytorchvideo), on Android to see video classification results, updated per second while the video plays, on tested videos, videos from the Photos library, or even real-time videos. - - -## PyTorch Android Tutorial and Recipes - -### [Image Segmentation DeepLabV3 on Android](https://pytorch.org/tutorials/beginner/deeplabv3_on_android.html) - -A comprehensive step-by-step tutorial on how to prepare and run the PyTorch DeepLabV3 image segmentation model on Android. - -### [PyTorch Mobile Performance Recipes](https://pytorch.org/tutorials/recipes/mobile_perf.html) - -List of recipes for performance optimizations for using PyTorch on Mobile. - -### [Making Android Native Application That Uses PyTorch Android Prebuilt Libraries](https://pytorch.org/tutorials/recipes/android_native_app_with_custom_op.html) - -Learn how to make Android application from the scratch that uses LibTorch C++ API and uses TorchScript model with custom C++ operator. - -### [Fuse Modules recipe](https://pytorch.org/tutorials/recipes/fuse.html) - -Learn how to fuse a list of PyTorch modules into a single module to reduce the model size before quantization. - -### [Quantization for Mobile Recipe](https://pytorch.org/tutorials/recipes/quantization.html) - -Learn how to reduce the model size and make it run faster without losing much on accuracy. - -### [Script and Optimize for Mobile](https://pytorch.org/tutorials/recipes/script_optimized.html) - -Learn how to convert the model to TorchScipt and (optional) optimize it for mobile apps. - -### [Model Preparation for Android Recipe](https://pytorch.org/tutorials/recipes/model_preparation_android.html) - -Learn how to add the model in an Android project and use the PyTorch library for Android. - -## Building PyTorch Android from Source - -In some cases you might want to use a local build of PyTorch android, for example you may build custom LibTorch binary with another set of operators or to make local changes, or try out the latest PyTorch code. - -For this you can use `./scripts/build_pytorch_android.sh` script. -``` -git clone https://github.com/pytorch/pytorch.git -cd pytorch -sh ./scripts/build_pytorch_android.sh -``` - -The workflow contains several steps: - -1\. Build libtorch for android for all 4 android abis (armeabi-v7a, arm64-v8a, x86, x86_64) - -2\. Create symbolic links to the results of those builds: -`android/pytorch_android/src/main/jniLibs/${abi}` to the directory with output libraries -`android/pytorch_android/src/main/cpp/libtorch_include/${abi}` to the directory with headers. These directories are used to build `libpytorch_jni.so` library, as part of the `pytorch_android-release.aar` bundle, that will be loaded on android device. - -3\. And finally run `gradle` in `android/pytorch_android` directory with task `assembleRelease` - -Script requires that Android SDK, Android NDK, Java SDK, and gradle are installed. -They are specified as environment variables: - -`ANDROID_HOME` - path to [Android SDK](https://developer.android.com/studio/command-line/sdkmanager.html) - -`ANDROID_NDK` - path to [Android NDK](https://developer.android.com/studio/projects/install-ndk). It's recommended to use NDK 21.x. - -`GRADLE_HOME` - path to [gradle](https://gradle.org/releases/) - -`JAVA_HOME` - path to [JAVA JDK](https://www.oracle.com/java/technologies/javase-downloads.html#javasejdk) - - -After successful build, you should see the result as aar file: - -``` -$ find android -type f -name *aar -android/pytorch_android/build/outputs/aar/pytorch_android-release.aar -android/pytorch_android_torchvision/build/outputs/aar/pytorch_android_torchvision-release.aar -``` - -## Using the PyTorch Android Libraries Built from Source or Nightly - -First add the two aar files built above, or downloaded from the nightly built PyTorch Android repos at [here](https://oss.sonatype.org/#nexus-search;quick~pytorch_android) and [here](https://oss.sonatype.org/#nexus-search;quick~torchvision_android), to the Android project's `lib` folder, then add in the project's app `build.gradle` file: -``` -allprojects { - repositories { - flatDir { - dirs 'libs' - } - } -} - -dependencies { - - // if using the libraries built from source - implementation(name:'pytorch_android-release', ext:'aar') - implementation(name:'pytorch_android_torchvision-release', ext:'aar') - - // if using the nightly built libraries downloaded above, for example the 1.8.0-snapshot on Jan. 21, 2021 - // implementation(name:'pytorch_android-1.8.0-20210121.092759-172', ext:'aar') - // implementation(name:'pytorch_android_torchvision-1.8.0-20210121.092817-173', ext:'aar') - - ... - implementation 'com.android.support:appcompat-v7:28.0.0' - implementation 'com.facebook.fbjni:fbjni-java-only:0.0.3' -} -``` - -Also we have to add all transitive dependencies of our aars. As `pytorch_android` depends on `com.android.support:appcompat-v7:28.0.0` or `androidx.appcompat:appcompat:1.2.0`, we need to one of them. (In case of using maven dependencies they are added automatically from `pom.xml`). - -## Using the Nightly PyTorch Android Libraries - -Other than using the aar files built from source or downloaded from the links in the previous section, you can also use the nightly built Android PyTorch and TorchVision libraries by adding in your app `build.gradle` file the maven url and the nightly libraries implementation as follows: - -``` -repositories { - maven { - url "https://oss.sonatype.org/content/repositories/snapshots" - } -} - -dependencies { - ... - implementation 'org.pytorch:pytorch_android:1.8.0-SNAPSHOT' - implementation 'org.pytorch:pytorch_android_torchvision:1.8.0-SNAPSHOT' -} -``` - -This is the easiest way to try out the latest PyTorch code and the Android libraries, if you do not need to make any local changes. But be aware you may need to build the model used on mobile in the latest PyTorch - using either the latest PyTorch code or a quick nightly install with commands like `pip install --pre torch torchvision -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html` - to avoid possible model version mismatch errors when running the model on mobile. - -## Custom Build - -To reduce the size of binaries you can do custom build of PyTorch Android with only set of operators required by your model. -This includes two steps: preparing the list of operators from your model, rebuilding pytorch android with specified list. - -1\. Verify your PyTorch version is 1.4.0 or above. You can do that by checking the value of `torch.__version__`. - -2\. Preparation of the list of operators - -List of operators of your serialized torchscript model can be prepared in yaml format using python api function `torch.jit.export_opnames()`. -To dump the operators in your model, say `MobileNetV2`, run the following lines of Python code: -``` -# Dump list of operators used by MobileNetV2: -import torch, yaml -model = torch.jit.load('MobileNetV2.pt') -ops = torch.jit.export_opnames(model) -with open('MobileNetV2.yaml', 'w') as output: - yaml.dump(ops, output) -``` -3\. Building PyTorch Android with prepared operators list. - -To build PyTorch Android with the prepared yaml list of operators, specify it in the environment variable `SELECTED_OP_LIST`. Also in the arguments, specify which Android ABIs it should build; by default it builds all 4 Android ABIs. - -``` -# Build PyTorch Android library customized for MobileNetV2: -SELECTED_OP_LIST=MobileNetV2.yaml scripts/build_pytorch_android.sh arm64-v8a -``` - -After successful build you can integrate the result aar files to your android gradle project, following the steps from previous section of this tutorial (Building PyTorch Android from Source). - -## Use PyTorch JIT interpreter - -PyTorch JIT interpreter is the default interpreter before 1.9 (a version of our PyTorch interpreter that is not as size-efficient). It will still be supported in 1.9, and can be used via `build.gradle`: -``` -repositories { - jcenter() -} - -dependencies { - implementation 'org.pytorch:pytorch_android:1.9.0' - implementation 'org.pytorch:pytorch_android_torchvision:1.9.0' -} -``` - - -## Android Tutorials - -Watch the following [video](https://youtu.be/5Lxuu16_28o) as PyTorch Partner Engineer Brad Heintz walks through steps for setting up the PyTorch Runtime for Android projects: - -[![PyTorch Mobile Runtime for Android](https://i.ytimg.com/vi/O_2KBhkIvnc/maxresdefault.jpg){:height="75%" width="75%"}](https://youtu.be/5Lxuu16_28o "PyTorch Mobile Runtime for Android") - -The corresponding code can be found [here](https://github.com/pytorch/workshops/tree/master/PTMobileWalkthruAndroid). - -Checkout our [Mobile Performance Recipes](https://pytorch.org/tutorials/recipes/mobile_perf.html) which cover how to optimize your model and check if optimizations helped via benchmarking. - -In addition, follow this recipe to learn how to [make Native Android Application that use PyTorch prebuilt libraries](https://pytorch.org/tutorials/recipes/android_native_app_with_custom_op.html). - -## API Docs - -You can find more details about the PyTorch Android API in the [Javadoc](https://pytorch.org/javadoc/). - - - - diff --git a/_mobile/home.md b/_mobile/home.md deleted file mode 100644 index 8638e1058c9b..000000000000 --- a/_mobile/home.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -layout: mobile -title: Home -permalink: /mobile/home/ -background-class: mobile-background -body-class: mobile -order: 1 -published: true -redirect_from: "/mobile/" ---- - -
        -

        Note

        -

        PyTorch Mobile is no longer actively supported. Please check out ExecuTorch, PyTorch’s all-new on-device inference library.

        -
        - -# PyTorch Mobile - -There is a growing need to execute ML models on edge devices to reduce latency, preserve privacy, and enable new interactive use cases. - -The PyTorch Mobile runtime beta release allows you to seamlessly go from training a model to deploying it, while staying entirely within the PyTorch ecosystem. It provides an end-to-end workflow that simplifies the research to production environment for mobile devices. In addition, it paves the way for privacy-preserving features via federated learning techniques. - -PyTorch Mobile is in beta stage right now, and is already in wide scale production use. It will soon be available as a stable release once the APIs are locked down. - - -## Key features -* Available for [iOS]({{site.baseurl}}/mobile/ios), [Android]({{site.baseurl}}/mobile/android) and Linux -* Provides APIs that cover common preprocessing and integration tasks needed for incorporating ML in mobile applications -* Support for tracing and scripting via TorchScript IR -* Support for XNNPACK floating point kernel libraries for Arm CPUs -* Integration of QNNPACK for 8-bit quantized kernels. Includes support for per-channel quantization, dynamic quantization and more -* Provides an [efficient mobile interpreter in Android and iOS](https://pytorch.org/tutorials/recipes/mobile_interpreter.html). Also supports build level optimization and selective compilation depending on the operators needed for user applications (i.e., the final binary size of the app is determined by the actual operators the app needs). -* Streamline model optimization via optimize_for_mobile -* Support for hardware backends like GPU, DSP, and NPU will be available soon in Beta - - -## Prototypes -We have launched the following features in prototype, available in the PyTorch nightly releases, and would love to get your feedback on the [PyTorch forums](https://discuss.pytorch.org/c/mobile/18): - -* GPU support on [iOS via Metal](https://pytorch.org/tutorials/prototype/ios_gpu_workflow.html) -* GPU support on [Android via Vulkan](https://pytorch.org/tutorials/prototype/vulkan_workflow.html) -* DSP and NPU support on Android via [Google NNAPI](https://pytorch.org/tutorials/prototype/nnapi_mobilenetv2.html) - - -## Deployment workflow - -A typical workflow from training to mobile deployment with the optional model optimization steps is outlined in the following figure. -
        - -
        - -## Examples to get you started - -* [PyTorch Mobile Runtime for iOS](https://www.youtube.com/watch?v=amTepUIR93k) -* [PyTorch Mobile Runtime for Android](https://www.youtube.com/watch?v=5Lxuu16_28o) -* [PyTorch Mobile Recipes in Tutorials](https://pytorch.org/tutorials/recipes/ptmobile_recipes_summary.html) -* [Image Segmentation DeepLabV3 on iOS](https://pytorch.org/tutorials/beginner/deeplabv3_on_ios.html) -* [Image Segmentation DeepLabV3 on Android](https://pytorch.org/tutorials/beginner/deeplabv3_on_android.html) -* [D2Go Object Detection on iOS](https://github.com/pytorch/ios-demo-app/tree/master/D2Go) -* [D2Go Object Detection on Android](https://github.com/pytorch/android-demo-app/tree/master/D2Go) -* [PyTorchVideo on iOS](https://github.com/pytorch/ios-demo-app/tree/master/TorchVideo) -* [PyTorchVideo on Android](https://github.com/pytorch/android-demo-app/tree/master/TorchVideo) -* [Speech Recognition on iOS](https://github.com/pytorch/ios-demo-app/tree/master/SpeechRecognition) -* [Speech Recognition on Android](https://github.com/pytorch/android-demo-app/tree/master/SpeechRecognition) -* [Question Answering on iOS](https://github.com/pytorch/ios-demo-app/tree/master/QuestionAnswering) -* [Question Answering on Android](https://github.com/pytorch/android-demo-app/tree/master/QuestionAnswering) - -## Demo apps - -Our new demo apps also include examples of image segmentation, object detection, neural machine translation, -question answering, and vision transformers. They are available on both iOS and Android: - -* [iOS demo apps](https://github.com/pytorch/ios-demo-app) -* [Android demo apps](https://github.com/pytorch/android-demo-app) - - - - - - diff --git a/_mobile/ios.md b/_mobile/ios.md deleted file mode 100644 index 85a473df82f2..000000000000 --- a/_mobile/ios.md +++ /dev/null @@ -1,330 +0,0 @@ ---- -layout: mobile -title: iOS -permalink: /mobile/ios/ -background-class: mobile-background -body-class: mobile -order: 2 -published: true ---- - -
        -

        Note

        -

        PyTorch Mobile is no longer actively supported. Please check out ExecuTorch, PyTorch’s all-new on-device inference library. You can also review this page to learn more about how to use ExecuTorch to build an iOS app.

        -
        - -# iOS - -To get started with PyTorch on iOS, we recommend exploring the following [HelloWorld](https://github.com/pytorch/ios-demo-app/tree/master/HelloWorld). - -## Quickstart with a Hello World Example - -HelloWorld is a simple image classification application that demonstrates how to use PyTorch C++ libraries on iOS. The code is written in Swift and uses Objective-C as a bridge. - -### Requirements - -- XCode 11.0 or above -- iOS 12.0 or above - -### Model Preparation - -Let's start with model preparation. If you are familiar with PyTorch, you probably should already know how to train and save your model. In case you don't, we are going to use a pre-trained image classification model - [MobileNet v2](https://pytorch.org/hub/pytorch_vision_mobilenet_v2/), which is already packaged in [TorchVision](https://pytorch.org/vision/stable/index.html). To install it, run the command below. - -> We highly recommend following the [Pytorch Github page](https://github.com/pytorch/pytorch) to set up the Python development environment on your local machine. - -```shell -pip install torchvision -``` - -Once we have TorchVision installed successfully, let's navigate to the HelloWorld folder and run `trace_model.py`. The script contains the code of tracing and saving a [torchscript model](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html) that can be run on mobile devices. - -```shell -python trace_model.py -``` - -If everything works well, `model.pt` should be generated and saved in the `HelloWorld/HelloWorld/model` folder. - -> To find out more details about TorchScript, please visit [tutorials on pytorch.org](https://pytorch.org/tutorials/advanced/cpp_export.html) - -### Install LibTorch-Lite via Cocoapods - -The PyTorch C++ library is available in [Cocoapods](https://cocoapods.org/), to integrate it to our project, simply run - -```ruby -pod install -``` - -Now it's time to open the `HelloWorld.xcworkspace` in XCode, select an iOS simulator and launch it (cmd + R). If everything works well, we should see a wolf picture on the simulator screen along with the prediction result. - - - -### Code Walkthrough - -In this part, we are going to walk through the code step by step. - -#### Image Loading - -Let's begin with image loading. - -```swift -let image = UIImage(named: "image.jpg")! -imageView.image = image -let resizedImage = image.resized(to: CGSize(width: 224, height: 224)) -guard var pixelBuffer = resizedImage.normalized() else { - return -} -``` - -We first load the image from our bundle and resize it to 224x224. Then we call this `normalized()` category method to normalize the pixel buffer. Let's take a closer look at the code below. - -```swift -var normalizedBuffer: [Float32] = [Float32](repeating: 0, count: w * h * 3) -// normalize the pixel buffer -// see https://pytorch.org/hub/pytorch_vision_resnet/ for more detail -for i in 0 ..< w * h { - normalizedBuffer[i] = (Float32(rawBytes[i * 4 + 0]) / 255.0 - 0.485) / 0.229 // R - normalizedBuffer[w * h + i] = (Float32(rawBytes[i * 4 + 1]) / 255.0 - 0.456) / 0.224 // G - normalizedBuffer[w * h * 2 + i] = (Float32(rawBytes[i * 4 + 2]) / 255.0 - 0.406) / 0.225 // B -} -``` - -The code might look weird at first glance, but it’ll make sense once we understand our model. The input data is a 3-channel RGB image of shape (3 x H x W), where H and W are expected to be at least 224. The image has to be loaded in to a range of `[0, 1]` and then normalized using `mean = [0.485, 0.456, 0.406]` and `std = [0.229, 0.224, 0.225]`. - -#### TorchScript Module - -Now that we have preprocessed our input data and we have a pre-trained TorchScript model, the next step is to use them to run prediction. To do that, we'll first load our model into the application. - -```swift -private lazy var module: TorchModule = { - if let filePath = Bundle.main.path(forResource: "model", ofType: "pt"), - let module = TorchModule(fileAtPath: filePath) { - return module - } else { - fatalError("Can't find the model file!") - } -}() -``` -Note that the `TorchModule` Class is an Objective-C wrapper of `torch::jit::mobile::Module`. - -```cpp -torch::jit::mobile::Module module = torch::jit::_load_for_mobile(filePath.UTF8String); -``` -Since Swift can not talk to C++ directly, we have to either use an Objective-C class as a bridge, or create a C wrapper for the C++ library. For demo purpose, we're going to wrap everything in this Objective-C class. - -#### Run Inference - -Now it's time to run inference and get the results. - -```swift -guard let outputs = module.predict(image: UnsafeMutableRawPointer(&pixelBuffer)) else { - return -} -``` -Again, the `predict` method is just an Objective-C wrapper. Under the hood, it calls the C++ `forward` function. Let's take a look at how it's implemented. - -```cpp -at::Tensor tensor = torch::from_blob(imageBuffer, {1, 3, 224, 224}, at::kFloat); -c10::InferenceMode guard; -auto outputTensor = _impl.forward({tensor}).toTensor(); -float* floatBuffer = outputTensor.data_ptr(); -``` -The C++ function `torch::from_blob` will create an input tensor from the pixel buffer. Note that the shape of the tensor is `{1,3,224,224}` which represents `{N, C, H, W}` as we discussed in the above section. - -```cpp -c10::InferenceMode guard; -``` -The above line tells PyTorch to do inference only. - -Finally, we can call this `forward` function to get the output tensor and convert it to a `float` buffer. - -```cpp -auto outputTensor = _impl.forward({tensor}).toTensor(); -float* floatBuffer = outputTensor.data_ptr(); -``` - -### Collect Results - -The output tensor is a one-dimensional float array of shape 1x1000, where each value represents the confidence that a label is predicted from the image. The code below sorts the array and retrieves the top three results. - -```swift -let zippedResults = zip(labels.indices, outputs) -let sortedResults = zippedResults.sorted { $0.1.floatValue > $1.1.floatValue }.prefix(3) -``` - -## PyTorch Demo App - -For more complex use cases, we recommend to check out the [PyTorch demo application](https://github.com/pytorch/ios-demo-app). The demo app contains two showcases. A camera app that runs a quantized model to predict the images coming from device’s rear-facing camera in real time. And a text-based app that uses a text classification model to predict the topic from the input string. - -## More PyTorch iOS Demo Apps - -### Image Segmentation - -[Image Segmentation](https://github.com/pytorch/ios-demo-app/tree/master/ImageSegmentation) demonstrates a Python script that converts the PyTorch [DeepLabV3](https://pytorch.org/hub/pytorch_vision_deeplabv3_resnet101/) model for mobile apps to use and an iOS app that uses the model to segment images. - -### Object Detection - -[Object Detection](https://github.com/pytorch/ios-demo-app/tree/master/ObjectDetection) demonstrates how to convert the popular [YOLOv5](https://pytorch.org/hub/ultralytics_yolov5/) model and use it on an iOS app that detects objects from pictures in your photos, taken with camera, or with live camera. - -### Neural Machine Translation - -[Neural Machine Translation](https://github.com/pytorch/ios-demo-app/tree/master/Seq2SeqNMT) demonstrates how to convert a sequence-to-sequence neural machine translation model trained with the code in the [PyTorch NMT tutorial](https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html) and use the model in an iOS app to do French-English translation. - -### Question Answering - -[Question Answering](https://github.com/pytorch/ios-demo-app/tree/master/QuestionAnswering) demonstrates how to convert a powerful transformer QA model and use the model in an iOS app to answer questions about PyTorch Mobile and more. - -### Vision Transformer - -[Vision Transformer](https://github.com/pytorch/ios-demo-app/tree/master/ViT4MNIST) demonstrates how to use Facebook's latest Vision Transformer [DeiT](https://github.com/facebookresearch/deit) model to do image classification, and how convert another Vision Transformer model and use it in an iOS app to perform handwritten digit recognition. - -### Speech recognition - -[Speech Recognition](https://github.com/pytorch/ios-demo-app/tree/master/SpeechRecognition) demonstrates how to convert Facebook AI's wav2vec 2.0, one of the leading models in speech recognition, to TorchScript and how to use the scripted model in an iOS app to perform speech recognition. - -### Video Classification - -[TorchVideo](https://github.com/pytorch/ios-demo-app/tree/master/TorchVideo) demonstrates how to use a pre-trained video classification model, available at the newly released [PyTorchVideo](https://github.com/facebookresearch/pytorchvideo), on iOS to see video classification results, updated per second while the video plays, on tested videos, videos from the Photos library, or even real-time videos. - - -## PyTorch iOS Tutorial and Recipes - -### [Image Segmentation DeepLabV3 on iOS](https://pytorch.org/tutorials/beginner/deeplabv3_on_ios.html) - -A comprehensive step-by-step tutorial on how to prepare and run the PyTorch DeepLabV3 image segmentation model on iOS. - -### [PyTorch Mobile Performance Recipes](https://pytorch.org/tutorials/recipes/mobile_perf.html) - -List of recipes for performance optimizations for using PyTorch on Mobile. - -### [Fuse Modules recipe](https://pytorch.org/tutorials/recipes/fuse.html) - -Learn how to fuse a list of PyTorch modules into a single module to reduce the model size before quantization. - -### [Quantization for Mobile Recipe](https://pytorch.org/tutorials/recipes/quantization.html) - -Learn how to reduce the model size and make it run faster without losing much on accuracy. - -### [Script and Optimize for Mobile](https://pytorch.org/tutorials/recipes/script_optimized.html) - -Learn how to convert the model to TorchScipt and (optional) optimize it for mobile apps. - -### [Model Preparation for iOS Recipe](https://pytorch.org/tutorials/recipes/model_preparation_ios.html) - -Learn how to add the model in an iOS project and use PyTorch pod for iOS. - -## Build PyTorch iOS Libraries from Source - -To track the latest updates for iOS, you can build the PyTorch iOS libraries from the source code. - -``` -git clone --recursive https://github.com/pytorch/pytorch -cd pytorch -# if you are updating an existing checkout -git submodule sync -git submodule update --init --recursive -``` - -> Make sure you have `cmake` and Python installed correctly on your local machine. We recommend following the [Pytorch Github page](https://github.com/pytorch/pytorch) to set up the Python development environment - -### Build LibTorch-Lite for iOS Simulators - -Open terminal and navigate to the PyTorch root directory. Run the following command (if you already build LibTorch-Lite for iOS devices (see below), run `rm -rf build_ios` first): - -``` -BUILD_PYTORCH_MOBILE=1 IOS_PLATFORM=SIMULATOR ./scripts/build_ios.sh -``` -After the build succeeds, all static libraries and header files will be generated under `build_ios/install` - -### Build LibTorch-Lite for arm64 Devices - -Open terminal and navigate to the PyTorch root directory. Run the following command (if you already build LibTorch-Lite for iOS simulators, run `rm -rf build_ios` first): - -``` -BUILD_PYTORCH_MOBILE=1 IOS_ARCH=arm64 ./scripts/build_ios.sh -``` -After the build succeeds, all static libraries and header files will be generated under `build_ios/install` - -### XCode Setup - -Open your project in XCode, go to your project Target's `Build Phases` - `Link Binaries With Libraries`, click the + sign and add all the library files located in `build_ios/install/lib`. Navigate to the project `Build Settings`, set the value **Header Search Paths** to `build_ios/install/include` and **Library Search Paths** to `build_ios/install/lib`. - -In the build settings, search for **other linker flags**. Add a custom linker flag below - -``` --all_load -``` - -To use the custom built libraries the project, replace `#import ` (in `TorchModule.mm`) which is needed when using LibTorch-Lite via Cocoapods with the code below: -``` -#include -#include -#include -``` - -Finally, disable bitcode for your target by selecting the Build Settings, searching for **Enable Bitcode**, and set the value to **No**. - -## Using the Nightly PyTorch iOS Libraries in CocoaPods -If you want to try out the latest features added to PyTorch iOS, you can use the `LibTorch-Lite-Nightly` pod in your `Podfile`, it includes the nightly built libraries: -``` -pod 'LibTorch-Lite-Nightly' -``` -And then run `pod install` to add it to your project. If you wish to update the nightly pod to the newer one, you can run `pod update` to get the latest version. But be aware you may need to build the model used on mobile in the latest PyTorch - using either the latest PyTorch code or a quick nightly install with commands like `pip install --pre torch torchvision -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html` - to avoid possible model version mismatch errors when running the model on mobile. - -## Custom Build - -Starting from 1.4.0, PyTorch supports custom build. You can now build the PyTorch library that only contains the operators needed by your model. To do that, follow the steps below - -1\. Verify your PyTorch version is 1.4.0 or above. You can do that by checking the value of `torch.__version__`. - -2\. To dump the operators in your model, say `MobileNetV2`, run the following lines of Python code: - -```python -import torch, yaml -model = torch.jit.load('MobileNetV2.pt') -ops = torch.jit.export_opnames(model) -with open('MobileNetV2.yaml', 'w') as output: - yaml.dump(ops, output) -``` -In the snippet above, you first need to load the ScriptModule. Then, use `export_opnames` to return a list of operator names of the ScriptModule and its submodules. Lastly, save the result in a yaml file. - -3\. To run the iOS build script locally with the prepared yaml list of operators, pass in the yaml file generate from the last step into the environment variable `SELECTED_OP_LIST`. Also in the arguments, specify `BUILD_PYTORCH_MOBILE=1` as well as the platform/architechture type. Take the arm64 build for example, the command should be: - -``` -SELECTED_OP_LIST=MobileNetV2.yaml BUILD_PYTORCH_MOBILE=1 IOS_ARCH=arm64 ./scripts/build_ios.sh -``` -4\. After the build succeeds, you can integrate the result libraries to your project by following the [XCode Setup](#xcode-setup) section above. - -5\. The last step is to add a single line of C++ code before running `forward`. This is because by default JIT will do some optimizations on operators (fusion for example), which might break the consistency with the ops we dumped from the model. - -```cpp -torch::jit::GraphOptimizerEnabledGuard guard(false); -``` - -## Use PyTorch JIT interpreter -PyTorch JIT interpreter is the default interpreter before 1.9 (a version of our PyTorch interpreter that is not as size-efficient). It will still be supported in 1.9, and can be used in CocoaPods: -``` -pod 'LibTorch', '~>1.9.0' -``` - -## iOS Tutorials - -Watch the following [video](https://youtu.be/amTepUIR93k) as PyTorch Partner Engineer Brad Heintz walks through steps for setting up the PyTorch Runtime for iOS projects: - -[![PyTorch Mobile Runtime for iOS](https://i.ytimg.com/vi/JFy3uHyqXn0/maxresdefault.jpg){:height="75%" width="75%"}](https://youtu.be/amTepUIR93k" PyTorch Mobile Runtime for iOS") - -The corresponding code can be found [here](https://github.com/pytorch/workshops/tree/master/PTMobileWalkthruIOS). - -Additionally, checkout our [Mobile Performance Recipes](https://pytorch.org/tutorials/recipes/mobile_perf.html) which cover how to optimize your model and check if optimizations helped via benchmarking. - - -## API Docs - -Currently, the iOS framework uses the Pytorch C++ front-end APIs directly. The C++ document can be found [here](https://pytorch.org/cppdocs/). To learn more about it, we recommend exploring the [C++ front-end tutorials](https://pytorch.org/tutorials/advanced/cpp_frontend.html) on PyTorch webpage. - -## Issues and Contribution - -If you have any questions or want to contribute to PyTorch, please feel free to drop issues or open a pull request to get in touch. - - - - diff --git a/_news/news-item-1.md b/_news/news-item-1.md deleted file mode 100644 index a4fffa020a22..000000000000 --- a/_news/news-item-1.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -order: 2 -link: /get-started/pytorch-2.0/#ask-the-engineers-20-live-qa-series -summary: "Ask the Engineers: 2.0 Live Q&A Series" ---- \ No newline at end of file diff --git a/_news/news-item-2.md b/_news/news-item-2.md deleted file mode 100644 index f3dcc0df5a93..000000000000 --- a/_news/news-item-2.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -order: 3 -link: https://fb.me/e/29RoWnqBX -summary: "Watch the PyTorch Conference online" ---- diff --git a/_news/news-item-3.md b/_news/news-item-3.md deleted file mode 100644 index 767382aa5c28..000000000000 --- a/_news/news-item-3.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -order: 1 -link: /blog/pytorch-2.0-release/ -summary: "PyTorch 2.0: Our next generation release that is faster, more Pythonic and Dynamic as ever" ---- diff --git a/_past_issues/2021-03-11-issue-1.md b/_past_issues/2021-03-11-issue-1.md deleted file mode 100644 index a7561fd6ce31..000000000000 --- a/_past_issues/2021-03-11-issue-1.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: "Issue #1" -issue: 1 -date: 2021-03-11 ---- - - -# Issue \#1 - -Welcome to the first issue of the PyTorch Contributors newsletter! Keeping track of everything that’s happening in the PyTorch developer world is a big task; here you will find curated news including RFCs, feature roadmaps, notable PRs, editorials from developers, and more. If you have questions or suggestions for the newsletter, just reply back to this email. - -## PyTorch 1.8.0 - -PyTorch 1.8 was released on March 4th with support for functional transformations using `torch.fx`, stabilized frontend APIs for scientific computing (`torch.fft`, `torch.linalg`, Autograd for complex tensors) and significant improvements to distributed training. Read the full [Release Notes](https://github.com/pytorch/pytorch/releases/tag/v1.8.0){:target="_blank"}. - -## PyTorch Ecosystem Day - -On April 21, we’re hosting a virtual event for our ecosystem and industry communities to showcase their work and discover new opportunities to collaborate. The day will be filled with discussion on new developments, trends, challenges and best practices through posters, breakout sessions and networking. - -## [The PyTorch open source process](http://blog.ezyang.com/2021/01/pytorch-open-source-process/){:target="_blank"} - -[@ezyang](https://github.com/ezyang){:target="_blank"} describes the challenges of maintaining a PyTorch-scale project, and the current open source processes (triaging and CI oncalls, RFC discussions) to help PyTorch operate effectively. - -## Developers forum - -We launched https://dev-discuss.pytorch.org/ a low-traffic high-signal forum for long-form discussions about PyTorch internals. - -## [RFC] [Dataloader v2](https://github.com/pytorch/pytorch/issues/49440) - -[@VitalyFedyunin](https://github.com/VitalyFedyunin) proposes redesigning the DataLoader to support lazy loading, sharding, pipelining data operations (including async) and shuffling & sampling in a more modular way. Join the discussion [here](https://github.com/pytorch/pytorch/issues/49440). - -## [RFC] [Improving TorchScript Usability](https://dev-discuss.pytorch.org/t/torchscript-usability/55) - -In a series of 3 blog posts ([1](https://lernapparat.de/scripttorch/), [2](https://lernapparat.de/jit-python-graphops/), [3](https://lernapparat.de/jit-fallback/)) [@t-vi](https://github.com/t-vi) explores ideas to improve the user and developer experience of TorchScript. - -## [RFC] [CSR and DM storage formats for sparse tensors](https://github.com/pytorch/rfcs/pull/13) - -[@pearu](https://github.com/pearu) proposes an [RFC](https://github.com/pytorch/rfcs/pull/13) to make linear algebra operations more performant by - -- implementing the CSR storage format, where a 2D array is defined by shape and 1D tensors for compressed row indices, column indices, and values (PyTorch 1D tensor) -- introducing the Dimension Mapping storage format that generalizes a 2D CSR to multidimensional arrays using a bijective mapping between the storage and wrapper elements. - -## [RFC] [Forward Mode AD](https://github.com/pytorch/rfcs/pull/11) - -[@albanD](https://github.com/albanD) proposes an [RFC](https://github.com/pytorch/rfcs/pull/11) to implement forward mode autodiff using Tensor-based [dual numbers](https://blog.demofox.org/2014/12/30/dual-numbers-automatic-differentiation/), where the real part represents the tensor and the *dual* part stores the forward gradient of the tensor. The core of the feature has landed [(PR)](https://github.com/pytorch/pytorch/pull/49734), with more formulas in WIP. Complete forward mode AD is expected to land by July 2021. diff --git a/_past_issues/2021-05-11-issue-2.md b/_past_issues/2021-05-11-issue-2.md deleted file mode 100644 index 8324c5a923d5..000000000000 --- a/_past_issues/2021-05-11-issue-2.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: "Issue #2" -issue: 2 -date: 2021-05-20 ---- - - -# Issue \#2 - -Welcome to the second edition of the PyTorch newsletter! In this issue, read about how we celebrated the PyTorch community at the first-ever PyTorch Ecosystem Day (PTED), discover a new podcast for PyTorch developers, and learn about important updates to the PyTorch frontend. - -## PyTorch Ecosystem Day - -**Piotr Bialecki (Sr. Software Engineer, NVIDIA)** spoke about his journey of using PyTorch and what he sees in the future for PyTorch. **Miquel FarrĂ© (Sr. Technology Manager, Disney)** spoke about the Creative Genome project that uses the PyTorch ecosystem to annotate all Disney content. **Ritchie Ng (CEO, Hessian Matrix)** spoke about the growth of AI in the Asia Pacific region, and how to get started with PyTorch for production AI use cases. Members of the community showcased how they were using PyTorch via 71 posters and pop-up breakout sessions. See all of the [posters](https://pytorch.org/ecosystem/pted/2021) and listen to the opening [keynote talks](https://www.youtube.com/playlist?list=PL_lsbAsL_o2At9NcX1mR9d12KYUWqxOx9) here! - -## PyTorch Developer Podcast - -**Edward Yang (Research Engineer, Facebook AI)** talks about internal development concepts like binding C++ in Python, the dispatcher, PyTorch’s library structure and more. Check out this new series; each episode is around 15 minutes long. [Listen to it](https://pytorch-dev-podcast.simplecast.com/) wherever you get your podcasts. - -## Forward Mode AD -The core logic for Forward Mode AD (based on “dual tensors”) is now in PyTorch. All the APIs to manipulate such Tensors, codegen and view handling are in `master (1.9.0a0)` already. Gradcheck and a first set of formulas will be added in the following month; full support for all PyTorch functions, custom Autograd functions and higher order gradients will happen later this year. Read more about this or share your feedback with [@albanD](https://github.com/albanD) on the corresponding [RFC](https://github.com/pytorch/rfcs/pull/11). - -## Make complex conjugation lazy - -[PR #54987](https://github.com/pytorch/pytorch/pull/54987) makes the conjugate operation on complex tensors return a view that has a special `is_conj()` bit flipped. Aside from saving memory by not creating a full tensor, this grants a potential speedup if the following operation can handle conjugated inputs directly. For such operations (like `gemm`), a flag is passed to the low-level API; for others the conjugate is materialized before passing to the operation. - -## torch.use_deterministic_algorithms is stable - -`torch.use_deterministic_algorithms()` ([docs](https://pytorch.org/docs/master/generated/torch.use_deterministic_algorithms.html)) is stable in `master (1.9.0a0)`. If True, the flag switches non-deterministic operations to their deterministic implementation if available, and throws a `RuntimeError` if not. - -## torch.linalg and torch.special - -`torch.linalg` is now stable; the module maintains fidelity with NumPy’s np.linalg linear algebra functions. -`torch.special` (beta) contains functions in scipy.special. Here’s the [tracking issue](https://github.com/pytorch/pytorch/issues/50345) if you’d like to contribute functions to torch.special. If you want a function not already on the list, let us know on the tracking issue about your use case and why it should be added. - -## Generalizing AMP to work on CPU - -> [@ezyang](https://dev-discuss.pytorch.org/t/generalizing-amp-to-work-on-cpu/201): Intel is interested in bringing automatic mixed precision to CPU in [[RFC] Extend Autocast to CPU/CUDA with BF16 data type · Issue #55374 · pytorch/pytorch ·](https://github.com/pytorch/pytorch/issues/55374) One big question is what the API for autocasting should be for CPU; should we provide a single, generalized API torch.autocast (keep in mind that CPU autocasting would be through bfloat16, while the existing GPU autocasting is via float16), or provide separate APIs for CPU/CUDA? If you have any thoughts or opinions on the subject, please chime in on the issue. - -
        -
        - -Are you enjoying reading this newsletter? What would you like to know more about? All feedback is welcome and appreciated! To share your suggestions, use this [form](https://forms.gle/K75ELciLJxnabKKH9) or simply reply to this email. diff --git a/_resources/cn-docs.md b/_resources/cn-docs.md deleted file mode 100644 index 4575636110a6..000000000000 --- a/_resources/cn-docs.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: äž­æ–‡æ–‡æĄŁ -summary: Docs and tutorials in Chinese, translated by the community. -class: pytorch-resource -link: https://pytorch.apachecn.org/ -order: 3 -featured-home: true -summary-home: Docs and tutorials in Chinese, translated by the community. - ---- diff --git a/_resources/contribution-guide.md b/_resources/contribution-guide.md deleted file mode 100644 index 6f729b384ef6..000000000000 --- a/_resources/contribution-guide.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Contribution Guide -summary-home: 'Learn how you can contribute to PyTorch code and documentation.' -summary: 'Learn how you can contribute to PyTorch code and documentation.' -class: pytorch-resource -link: https://pytorch.org/docs/master/community/contribution_guide.html -order: 8 -featured-home: true ---- diff --git a/_resources/contributor.md b/_resources/contributor.md deleted file mode 100644 index 842e4209b6d1..000000000000 --- a/_resources/contributor.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Newsletter -summary-home: 'Stay up-to-date with the latest updates.' -summary: 'Stay up-to-date with the latest updates.' -link: /newsletter -class: pytorch-resource -order: 13 -featured-home: true ---- diff --git a/_resources/design-philosophy.md b/_resources/design-philosophy.md deleted file mode 100644 index 724b7aba10b4..000000000000 --- a/_resources/design-philosophy.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Design Philosophy -summary-home: 'PyTorch design principles for contributors and maintainers.' -summary: 'PyTorch design principles for contributors and maintainers.' -class: pytorch-resource -link: https://pytorch.org/docs/master/community/design.html -order: 9 -featured-home: true ---- diff --git a/_resources/dive-into-deep-learning.md b/_resources/dive-into-deep-learning.md deleted file mode 100644 index 4cd4dd383cfe..000000000000 --- a/_resources/dive-into-deep-learning.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Dive into Deep Learning -summary-home: An interactive deep learning book. -summary: An interactive deep learning book. -link: https://d2l.ai/ -order: 11 -featured-home: false -show-pytorch-logo: true ---- diff --git a/_resources/docs.md b/_resources/docs.md deleted file mode 100644 index 0f47871ab552..000000000000 --- a/_resources/docs.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: Docs -summary: Access comprehensive developer documentation. -class: pytorch-resource -link: https://pytorch.org/docs/ -order: 1 - ---- diff --git a/_resources/example-projects.md b/_resources/example-projects.md deleted file mode 100644 index ecc8ac378afb..000000000000 --- a/_resources/example-projects.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: Examples -summary: View example projects for vision, text, RL, and more. -class: pytorch-resource -link: https://github.com/pytorch/examples -order: 6 ---- diff --git a/_resources/fast-ai.md b/_resources/fast-ai.md deleted file mode 100644 index 1f9b6eaa8951..000000000000 --- a/_resources/fast-ai.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: fast.ai -summary: Get up and running on PyTorch quickly with free learning courses. -class: pytorch-resource -link: https://www.fast.ai/ -order: 9 ---- diff --git a/_resources/github.md b/_resources/github.md deleted file mode 100644 index 3c6b703e939d..000000000000 --- a/_resources/github.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: GitHub -summary: Report bugs, request features, discuss issues, and more. -summary-home: Report bugs, request features, discuss issues, and more. -class: github -link: https://github.com/pytorch/pytorch -order: 3 -featured-home: false ---- diff --git a/_resources/governance.md b/_resources/governance.md deleted file mode 100644 index 1c3da287de49..000000000000 --- a/_resources/governance.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Governance -summary-home: 'Learn about the PyTorch governance hierarchy.' -summary: 'Learn about the PyTorch governance hierarchy.' -class: pytorch-resource -link: https://pytorch.org/docs/master/community/governance.html -order: 10 -featured-home: true ---- diff --git a/_resources/jp-tutorials.md b/_resources/jp-tutorials.md deleted file mode 100644 index 25730ad83c59..000000000000 --- a/_resources/jp-tutorials.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: æ—„æœŹèȘž (PyTorch) -summary: Tutorials in Japanese, translated by the community. -class: pytorch-resource -link: https://yutaroogawa.github.io/pytorch_tutorials_jp/ -order: 5 -featured-home: true -summary-home: Tutorials in Japanese, translated by the community. - ---- diff --git a/_resources/korean_tutorials.md b/_resources/korean_tutorials.md deleted file mode 100644 index b0ccc9c5b09b..000000000000 --- a/_resources/korean_tutorials.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: 파읎토ìč˜ (PyTorch) íŠœí† ëŠŹì–Œ -summary: Tutorials in Korean, translated by the community. -class: pytorch-resource -link: https://tutorials.pytorch.kr/ -order: 4 -featured-home: true -summary-home: Tutorials in Korean, translated by the community. - ---- diff --git a/_resources/maintainers.md b/_resources/maintainers.md deleted file mode 100644 index b5c74998be0f..000000000000 --- a/_resources/maintainers.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Maintainers -summary-home: 'Learn about the PyTorch core and module maintainers.' -summary: 'Learn about the PyTorch core and module maintainers.' -class: pytorch-resource -link: https://pytorch.org/docs/master/community/persons_of_interest.html -order: 7 -featured-home: true ---- diff --git a/_resources/mobile-demo.md b/_resources/mobile-demo.md deleted file mode 100644 index 42f17b0a90a2..000000000000 --- a/_resources/mobile-demo.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Mobile Demo -summary-home: Check out the PyTorch Mobile demo app for iOS and Android. -summary: Check out the PyTorch Mobile demo app for iOS and Android. -class: pytorch-resource -link: https://github.com/pytorch/android-demo-app -order: 10 -featured-home: false ---- diff --git a/_resources/pytorch-discuss.md b/_resources/pytorch-discuss.md deleted file mode 100644 index 1c88d271f169..000000000000 --- a/_resources/pytorch-discuss.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: PyTorch Discuss -summary-home: Browse and join discussions on deep learning with PyTorch. -summary: Browse and join discussions on deep learning with PyTorch. -class: pytorch-resource -link: https://discuss.pytorch.org -order: 1 -featured-home: true ---- diff --git a/_resources/slack.md b/_resources/slack.md deleted file mode 100644 index cb5cac584618..000000000000 --- a/_resources/slack.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Slack -summary-home: 'Discuss advanced topics.' -summary: 'Discuss advanced topics.' -class: slack -link: https://join.slack.com/t/pytorch/shared_invite/zt-2j2la612p-miUinTTaxXczKOJw48poHA -order: 2 -featured-home: true ---- diff --git a/_resources/training-cert.md b/_resources/training-cert.md deleted file mode 100644 index 0456d1ad5810..000000000000 --- a/_resources/training-cert.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: PyTorch Training & Certification -summary-home: Further your education and career goals. -summary: Further your education and career goals. -class: pytorch-resource -link: https://training.linuxfoundation.org/full-catalog/?_sf_s=PyTorch -order: 14 -featured-home: true ---- diff --git a/_resources/tutorials.md b/_resources/tutorials.md deleted file mode 100644 index 619ec3755b22..000000000000 --- a/_resources/tutorials.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: Tutorials -summary: Get in-depth tutorials for beginners and advanced developers. -class: pytorch-resource -link: https://pytorch.org/tutorials -order: 2 - ---- diff --git a/_sass/_variables.scss b/_sass/_variables.scss deleted file mode 100644 index 25c95f460053..000000000000 --- a/_sass/_variables.scss +++ /dev/null @@ -1,123 +0,0 @@ -$custom-font-size: 16px; -$black: #000000; -$white: #ffffff; -$dark_grey: #6c6c6d; -$light_grey: #f3f4f7; -$orange: #ee4c2c; -$medium_grey: #f3f4f7; -$not_quite_black: #262626; -$slate: #262626; -$very_light_grey: #f3f4f7; -$very_dark_grey: #CCCDD1; -$content_text_color: #6c6c6d; -$code_background_color: #f3f4f7; -$dark_blue: #3d5a97; -$quick_start_grey: #6c6c6d; -$command_block_black: #6c6c6d; -$smoky_grey: #CCCDD1; -$medium_smoky_grey: #CCCDD1; -$code_link_color: #4974D1; -$purple: #812CE5; -$light_white: #e2e2e2; -$mid_gray: #797676; - -$desktop_header_height: 90px; -$mobile_header_height: 68px; -$desktop_footer_height: 620px; -$site_horizontal_padding: 30px; - -@import "../node_modules/bootstrap/scss/variables"; - -@mixin desktop { - @media screen and (min-width: 768px) { @content; } -} - -@mixin full-nav-menu-desktop { - @media screen and (min-width: 1200px) { @content; } -} - -@mixin max-width-desktop { - @media screen and (min-width: 1240px) { @content; } -} - -@mixin small-desktop { - @media (min-width: 768px) and (max-width: 1239px) { @content; } -} - -@function rem($px) { - @return ($px / 16px) * 1rem; -} - -@mixin code_font_family { - font-family: IBMPlexMono,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace; -} - -@mixin clearfix { - &:before, - &:after { - content: ""; - display: table; - } - &:after { - clear: both; - } - & { - *zoom: 1; - } -} - -@mixin default_link_styles { - a:link, - a:visited, - a:hover { - color: $orange; - text-decoration: none; - } - - @include desktop { - a:hover { - text-decoration: underline; - } - - a.social-icon:hover { - text-decoration: none; - } - } -} - -@mixin animated_border_hover_state { - @include desktop { - &:after { - content: ""; - display: block; - width: 0; - height: 1px; - position: absolute; - bottom: 0; - left: 0; - background-color: $orange; - transition: width .250s ease-in-out; - } - - &:hover:after { - width: 100%; - } - &:hover { - color: $not_quite_black; - } - } -} - -@mixin external_link_icon { - &:after { - content: url($baseurl + "/assets/images/external-link-icon.svg"); - margin-left: 15px; - } -} - -@mixin blog_date_and_feature { - font-size: rem(18px); - letter-spacing: 0; - line-height: rem(24px); - margin-bottom: rem(10px); -} diff --git a/_sass/announcement.scss b/_sass/announcement.scss deleted file mode 100644 index 650d57ba0f08..000000000000 --- a/_sass/announcement.scss +++ /dev/null @@ -1,406 +0,0 @@ -.announcement { - .hero-content { - top: $mobile_header_height + 80px; - height: 250px; - position: relative; - margin-bottom: 120px; - justify-content: center; - - @include desktop { - top: $mobile_header_height + 110px; - height: 350px; - } - - h1 { - font-size: rem(60px); - text-transform: uppercase; - font-weight: lighter; - letter-spacing: 1.08px; - margin-bottom: rem(10px); - line-height: 1.05; - color: $white; - - @include desktop { - font-size: rem(72px); - } - - } - - h1.small { - font-size: 40px; - @include desktop { - font-size: 58px; - } - } - - .lead { - margin-bottom: rem(25px); - padding-top: rem(30px); - color: $white; - width: 100%; - } - } - - - .row { - justify-content: center; - } - - .main-content { - margin-bottom: 5rem; - padding-bottom: 0; - } - - .main-background { - height: 370px; - @include desktop { - height: 450px; - } - - } - - .card-container { - display: grid; - grid-template-columns: repeat(2, 1fr); - gap: 20px; - padding-top: 3rem; - .card { - border: none; - display: block; - a { - color: $black; - } - .card-body { - display: flex; - flex-direction: column; - height: 100%; - justify-content: space-between; - padding: 0; - - img { - width: 100%; - height: 207px; - object-fit: contain; - padding: 20px; - @media screen and (min-width: 1000px) { - padding: 30px; - } - } - } - } - @media screen and (min-width: 1000px) { - grid-template-columns: repeat(3, 1fr); - gap: 36px; - } - } - - .contact-us-section { - background-color: $code_background_color; - padding: 50px 0; - .row { - justify-content: center; - .lead { - padding-top: rem(24px); - } - .hbspt-form { - padding: 30px 0; - - .hs-button { - background-image: url($baseurl + "/assets/images/chevron-right-orange.svg"); - background-size: 6px 13px; - background-position: top 16px right 11px; - background-repeat: no-repeat; - border-radius: 0; - border: none; - background-color: $white; - color: $quick_start_grey; - font-weight: 400; - position: relative; - letter-spacing: 0.25px; - padding: rem(12px) rem(32px) rem(12px) rem(12px); - margin: 10px 0; - - @include animated_border_hover_state; - - @include desktop { - background-position: top 19px right 11px; - } - - } - - fieldset.form-columns-2, fieldset.form-columns-1 { - max-width: 100%; - .hs-form-field { - max-width: 100%; - padding: 10px 0; - width: 100%; - input { - border: none; - width: 100%; - } - textarea { - border: none; - width: 100%; - } - } - } - - li.hs-form-radio { - input[type=radio] { - width: auto !important; - } - - span { - margin-left: 5px; - } - } - - ul { - list-style-type: none; - } - } - } - } - - .light-background-section { - background-color: $white; - .content { - padding: 40px 0; - } - - ul li { - font-size: 1.25rem; - font-weight: 300; - } - } - - .darker-background-section { - background-color: #f3f4f7; - .content { - padding: 40px 0; - } - } - - .grey-background-section { - background-color: #f3f4f7; - padding: 60px 0; - img { - height: 100px; - } - p { - font-size: 14px; - line-height: 170%; - } - } - - .color-background-section { - background-image: url("/assets/images/pytorch_bg_purple.jpg"); - background-size: 100% 100%; - background-repeat: no-repeat; - padding: 60px 0; - h2 { - color: white; - } - } - - .body-side-text { - .lead { - margin-bottom: rem(25px); - padding-top: rem(24px); - } - } - - img { - width: 100%; - } - - h2.upper { - font-size: 25px; - line-height: 130%; - text-align: center; - letter-spacing: 1.75px; - text-transform: uppercase; - margin-bottom: 30px; - } - - h3.upper { - font-size: 19px; - text-transform: uppercase; - letter-spacing: 1.75px; - line-height: 130%; - margin: 25px 0; - } - - table.benefits { - background-color: white; - font-size: 14px; - text-align: center; - td.benefit { - border-left: none; - min-width: 300px; - text-align: left; - @include desktop { - min-width: 520px; - } - } - tbody { - td { - border-left: 1px solid #812CE5; - vertical-align: middle; - } - td.benefit { - font-weight: 600; - } - } - thead, tfoot { - background-color: #812CE5; - color: white; - font-size: 16px; - font-weight: 700; - @include desktop { - font-size: 20px; - } - td { - border-left: 1px solid #000; - vertical-align: middle; - border-top: none; - } - a { - text-decoration: underline; - color: white; - } - td.price { - font-size: 14px; - line-height: 1.2; - @include desktop { - font-size: 16px; - } - } - } - img { - width: 15px; - } - } - .modal-header { - border-bottom: none; - padding-bottom: 0; - } - - .consolidated-employees { - tbody td { - font-weight: 600; - } - td.no-border { - border-left: none; - } -} - - .member-boxes { - gap: 20px; - margin: 0; - div.col-sm { - background-color: white; - } - } -} - -.board-member { - margin: 35px 0; - img { - margin-bottom: 15px; - } - a svg { - margin-top: 5px; - height: 25px; - max-width: 30px; - fill: #000; - color: #000; - } - a:hover svg { - fill: $orange; - color: $orange; - } -} - - -.announcement .cloud-credits-table { - font-size: 1.1rem; - margin-top: 40px; - ul { - padding-left: 20px; - li { - margin-top: 10px; - font-size: 1.1rem; - } - } - - .col-md { - border-radius: 5px; - margin-bottom: 40px; - } - - .card { - border-radius: 6px; - } - - .thead { - border-top-left-radius: 5px; - border-top-right-radius: 5px; - color: #fff; - padding: 14px 20px; - text-align: center; - } - .col-md:first-child .thead { - background: conic-gradient(from 53deg at 37% 100%, #828282 0, hsla(0, 0%, 51%, .95) 100%); - } - .col-md:nth-child(2) .thead { - background: conic-gradient(from 53deg at 37% 100%, #ab9344 0, rgba(171, 147, 68, .95) 100%); - } - .col-md:nth-child(3) .thead { - background: conic-gradient(from 53deg at 37% 100%, #293850 0, rgba(41, 56, 80, .95) 100%); - } - - .tbody { - border-bottom: 1px solid #d0d0d0; - border-left: 1px solid #d0d0d0; - border-right: 1px solid #d0d0d0; - height: 100%; - padding: 26px 20px; - } - - .tfoot { - background-color: #000; - border-bottom-left-radius: 5px; - border-bottom-right-radius: 5px; - color: #fff; - padding: 20px; - text-align: center; - } -} - -.announcement .steps-columns { - background-color: transparent; - - .col-md { - margin-bottom: 20px; - padding: 20px; - } - - h3 { - margin-bottom: 20px; - } - - .step { - font-size: 1.5rem; - margin-bottom: 5px; - margin-top: 20px; - } - - ul { - padding-left: 20px; - li { - margin-top: 10px; - } - } - -} \ No newline at end of file diff --git a/_sass/article.scss b/_sass/article.scss deleted file mode 100644 index 8b7aa931d584..000000000000 --- a/_sass/article.scss +++ /dev/null @@ -1,159 +0,0 @@ -article.pytorch-article { - max-width: 920px; - margin: 0 auto; - padding-bottom: 90px; - - h2, - h3, - h4, - h5, - h6 { - margin-top: rem(30px); - margin-bottom: rem(24px); - color: $not_quite_black; - } - - h2 { - font-size: rem(24px); - letter-spacing: 1.33px; - line-height: rem(32px); - margin-top: rem(50px); - text-transform: uppercase; - } - - h3 { - font-size: rem(24px); - letter-spacing: -0.25px; - line-height: rem(30px); - text-transform: none; - } - - h4, - h5, - h6 { - font-size: rem(18px); - letter-spacing: -0.19px; - line-height: rem(30px); - } - - p { - margin-bottom: rem(18px); - } - - p, - ul li, - ol li, - dl dt, - dl dd, - blockquote { - font-size: rem(18px); - line-height: rem(30px); - color: $content_text_color; - } - - table { - margin-bottom: rem(40px); - width: 100%; - } - - table thead { - border-bottom: 1px solid #cacaca; - } - - table th, - table tr, - table td { - color: $content_text_color; - font-size: rem(16px); - letter-spacing: -0.17px; - } - - table th { - padding: rem(10px); - color: $not_quite_black; - } - - - table td { - padding: rem(5px); - } - - ul, - ol{ - margin: rem(24px) 0 rem(50px) 0; - - @include desktop { - padding-left: rem(100px); - } - - li { - margin-bottom: rem(10px); - } - } - - dl { - margin-bottom: rem(40px); - } - - dl dt { - margin-bottom: rem(12px); - font-weight: 400; - } - - pre { - margin-bottom: rem(40px); - } - - hr { - margin-top: rem(75px); - margin-bottom: rem(75px); - } - - blockquote { - font-size: rem(12px); - font-style: italic; - padding: 15px 15px 5px 15px; - width: 100%; - background-color: rgba(211, 211, 211, 0.3); - border-left: 2px solid #000000; - } - - h3.no_toc { - margin: 0px; - } - - nav { - float: right; - display: block; - overflow-y: auto; - background-color: white; - margin-left: 20px; - border-left: 1px #717171; - } - - nav li { - font-size: 12px; - line-height: 20px; - padding-top: 0px; - list-style: none; - } - - nav a { - color: #717171; - font-weight: bold; - } - - ul#markdown-toc { - padding-left: 1em; - margin: 0px; - } - - ul#markdown-toc ul { - margin: 0px; - padding-left: 1em; - } - - ul#markdown-toc li { - margin: 0px; - } -} diff --git a/_sass/base_styles.scss b/_sass/base_styles.scss deleted file mode 100644 index 419e7be1d655..000000000000 --- a/_sass/base_styles.scss +++ /dev/null @@ -1,707 +0,0 @@ -* { - font-family: FreightSans, Helvetica Neue, Helvetica, Arial, sans-serif; - font-weight: 400; /* normal - https://developer.mozilla.org/en-US/docs/Web/CSS/font-weight#Common_weight_name_mapping */ -} - -h1, h2, h3, h4, h5, h6 { - font-family: FreightSans; -} - -p { - margin-bottom: 1.25rem; -} - -a, em, i, b, strong, u, span { - font-size: inherit; -} - -a:link, -a:visited, -a:hover { - text-decoration: none; - color: $orange; -} - -p { - @include default_link_styles; -} - -.btn, -a.btn { - border-radius: 0; - border: none; - background-color: $light_grey; - color: $quick_start_grey; - font-weight: 400; - position: relative; - letter-spacing: 0.25px; - - &.btn-lg { - font-size: 1.125rem; - padding-top: rem(8px); - } - - &.btn-white { - background-color: $white; - } - - &.btn-orange { - background-color: $orange; - } - - &.btn-demo { - color: $white; - } - - @include animated_border_hover_state; -} - -.navbar { - padding-left: 0; - padding-right: 0; -} - -html { - position: relative; - min-height: 100%; - font-size: 12px; - - @include desktop { - font-size: 16px; - } -} - -body { - @include desktop { - margin: 0 0 $desktop_footer_height; - } - - &.no-scroll { - height: 100%; - overflow: hidden; - } -} - -a, .btn { - &.with-right-arrow { - padding-right: rem(32px); - position: relative; - background-image: url($baseurl + "/assets/images/chevron-right-orange.svg"); - background-size: 6px 13px; - background-position: top 10px right 11px; - background-repeat: no-repeat; - @include desktop { - background-size: 8px 14px; - background-position: top 15px right 12px; - padding-right: rem(32px); - } - } - &.with-left-arrow { - padding-left: rem(32px); - position: relative; - background-image: url($baseurl + "/assets/images/chevron-left-grey.svg"); - background-size: 6px 13px; - background-position: top 10px left 11px; - background-repeat: no-repeat; - @include desktop { - background-size: 8px 14px; - background-position: top 16px left 12px; - padding-left: rem(32px); - } - } -} - -.main-background { - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 350px; - background-size: 100% 100%; - background-repeat: no-repeat; - background-image: url($baseurl + "/assets/images/pytorch_bg_purple.jpg"); - - @include desktop { - height: 640px; - } - - &.home-page-background { - z-index: -1; - height: 350px; - - @include desktop { - height: 570px; - } - } - &.hub-background { - height: 380px; - @include desktop { - height: 495px; - } - } - &.ecosystem-background { - @include desktop { - height: 472px; - } - } - &.events-background { - @include desktop { - height: 472px; - } - } - &.ecosystem-join-background { - @include desktop { - height: 435px; - } - } - &.ecosystem-detail-background { - } - &.resources-background { - height: 380px; - @include desktop { - height: 472px; - } - } - &.get-started-background { - height: 275px; - @include desktop { - height: 380px; - } - } - &.comm-stories-background { - height: 275px; - @include desktop { - height: 380px; - } - } - &.style-guide { - } - &.announcement-background { - } - &.features-background { - height: 335px; - @include desktop { - height: 300px; - } - } - &.blog-background { - } - &.mobile-background { - } - &.deep-learning-background { - } -} - -.bg-light-grey { - background-color: $light_grey; -} - -.text-dark-grey { - color: $dark_grey; -} - -.sidebar-links .top-section { - color: $black; -} - -.sidebar-links ul { - list-style-type: none; - padding-left: 0; - li { - color: $dark_grey; - margin-left: 20px; - a { - color: inherit; - } - } -} - -.sidebar-links .with-sub-sections { - &.top-section:before { - content: "+ "; - font-family: "Courier New", Courier, monospace; - width: 50px; - } - - &.top-section.open:before { - content: "- "; - font-family: "Courier New", Courier, monospace; - width: 50px; - } -} - -.bg-very-light-grey { - background-color: $very_light_grey; -} - -.email-subscribe-form { - input.email { - color: $orange; - border: none; - border-bottom: 1px solid #939393; - width: 100%; - background-color: transparent; - outline: none; - font-size: 1.125rem; - letter-spacing: 0.25px; - line-height: 2.25rem; - } - - ::-webkit-input-placeholder { /* Chrome/Opera/Safari */ - color: $orange; - } - ::-moz-placeholder { /* Firefox 19+ */ - color: $orange; - } - :-ms-input-placeholder { /* IE 10+ */ - color: $orange; - } - :-moz-placeholder { /* Firefox 18- */ - color: $orange; - } - - input[type="submit"] { - position: absolute; - right: 0; - top: 10px; - height: 15px; - width: 15px; - background-image: url($baseurl + "/assets/images/arrow-right-with-tail.svg"); - background-color: transparent; - background-repeat: no-repeat; - background-size: 15px 15px; - background-position: center center; - -webkit-appearance: none; - -moz-appearance: none; - appearance: none; - border: 0; - } -} - -.email-subscribe-form-fields-wrapper { - position: relative; -} - -.bg-slate { - background-color: $slate; -} - -.tweets-wrapper { - width: 100%; - - p { - font-size: rem(16px); - line-height: rem(24px); - letter-spacing: 0.22px; - } - - ol { - padding-left: 0; - } - - a { - color: $orange; - } - - img, - .timeline-Tweet-actions, - .timeline-Tweet-media, - .MediaCard { - display: none !important; - } -} - -.tweet { - margin-bottom: 2.2rem; - word-wrap: break-word; - - a { - color: $orange; - display: inline; - span { - color: inherit; - } - } - - p, span { - font-size: 1rem; - line-height: 1.5rem; - letter-spacing: 0.22px; - color: #A0A0A1; - } - - p { - @include max-width-desktop { - padding-right: 40px; - } - } - - span.retweeted, - span.in-reply-to { - font-size: rem(13px); - } - - p.tweet-header { - margin-bottom: rem(5px); - line-height: rem(12px); - } - - .tweet-bird { - &:before { - content: ""; - position: relative; - left: 0; - background-image: url($baseurl + "/assets/images/logo-twitter-grey.svg"); - background-size: 20px 16px; - display: inline-block; - width: 20px; - height: 16px; - - @include desktop { - margin-bottom: rem(10px); - } - } - } -} - -.anchorjs-link { - color: $quick_start_grey !important; - @include desktop { - &:hover { - color: inherit; - text-decoration: none !important; - } - } -} - -.article-page-module { - background-color: $light_grey; - padding-top: rem(30px); - padding-bottom: rem(30px); - - @include desktop { - padding-top: rem(60px); - padding-bottom: rem(60px); - } - - @include max-width-desktop { - .col-md-3 { - padding-left: 20px; - padding-right: 20px; - } - } - - .module-link-col { - .btn { - padding-left: 0; - } - @include desktop { - text-align: right; - .btn { - padding-left: inherit; - } - } - } - - .module-content-wrapper { - margin-top: rem(20px); - margin-bottom: rem(20px); - @include desktop { - margin-top: 0; - margin-bottom: 0; - } - } - - img { - margin-bottom: rem(30px); - width: 100%; - } - - h3 { - font-size: rem(24px); - letter-spacing: 1.33px; - line-height: rem(32px); - text-transform: uppercase; - margin-bottom: rem(20px); - @include desktop { - margin-bottom: rem(60px); - } - } - - h5, p { - font-size: rem(16px); - line-height: rem(24px); - } - - h5 { - color: $not_quite_black; - } - - p { - color: $very_dark_grey; - letter-spacing: 0.25px; - } -} - -.article-page-module .module-header { - position: relative; -} - -.article-page-module .module-button { - padding-left: 0; - @include desktop { - position: absolute; - right: 15px; - top: 0; - padding-top: 0; - padding-bottom: rem(2px); - background-position: center right; - padding-right: 16px; - } -} - -article.pytorch-article .note-card { - border-radius: 0; - border: none; - background-color: $orange; - color: white; - padding: 30px; - margin-bottom: 50px; - - h4 { - font-size: 1.5rem; - letter-spacing: 1.33px; - line-height: 2rem; - text-transform: uppercase; - color: white; - margin-top: 0; - margin-bottom: rem(18px); - } - - p { - font-size: rem(18px); - line-height: 1.5em; - margin-bottom: 0; - color: white; - a { - color: white; - font-weight: 700; - } - } - -} - -.ecosystem-card, -.resource-card, -.hub-card { - border-radius: 0; - border: none; - height: 110px; - margin-bottom: rem(20px); - margin-bottom: rem(30px); - overflow: scroll; - - @include max-width-desktop { - height: 150px; - overflow: inherit; - } - - @include small-desktop { - height: 170px; - overflow: inherit; - } - - p.card-summary { - font-size: rem(18px); - line-height: rem(24px); - margin-bottom: 0; - color: $dark_grey; - } - - h4 { - color: $slate; - margin-bottom: rem(18px); - overflow: hidden; - white-space: nowrap; - text-overflow: ellipsis; - } - - a { - height: 100%; - - @include desktop { - min-height: 190px; - } - - @include small-desktop { - min-height: 234px; - } - } - - @include animated_border_hover_state; - &:hover { - p.card-summary { - color: $not_quite_black; - } - } -} - -.ecosystem-card .card-body { - background-position: top rem(20px) right rem(20px); - background-repeat: no-repeat; - padding: rem(25px) rem(30px); - - &.reasoning { - background-image: url($baseurl + "/assets/images/logo-elf.svg"); - background-size: 29px 25px; - } - - &.tool { - background-image: url($baseurl + "/assets/images/logo-wav2letter.svg"); - background-size: 29px 25px; - } - - &.language { - background-image: url($baseurl + "/assets/images/logo-parlai.svg"); - background-size: 29px 25px; - } - - &.vision { - background-image: url($baseurl + "/assets/images/logo-detectron.svg"); - background-size: 29px 25px; - } - -} - -.resource-card { - border: 1px solid #d6d7d8; - background-color: transparent; - margin-bottom: rem(20px); - - @include desktop { - margin-bottom: 0; - } - - @include small-desktop { - height: 225px; - } - - .pytorch-image { - position: relative; - height: rem(20px); - width: rem(20px); - top: rem(50px); - } - - a { - letter-spacing: 0.25px; - color: $not_quite_black; - } - - .card-body { - display: block; - padding: 0 15px 0 0; - position: relative; - top: 20px; - margin-left: 60px; - - @include small-desktop { - top: 18px; - } - - @include max-width-desktop { - top: 30px; - margin-left: 80px; - padding-right: 30px; - } - } - - &.slack, - &.github, - &.pytorch-resource { - &:before { - content: ""; - background-size: 32px 32px; - background-repeat: no-repeat; - display: block; - position: absolute; - height: 32px; - width: 32px; - top: 15px; - left: 15px; - - @include max-width-desktop { - left: 30px; - top: 30px; - } - } - } - - &.slack { - &:before { - background-image: url($baseurl + "/assets/images/logo-slack.svg"); - } - } - - &.github { - &:before { - background-image: url($baseurl + "/assets/images/logo-github.svg"); - } - } - - &.pytorch-resource { - &:before { - background-image: url($baseurl + "/assets/images/logo-icon.svg"); - } - } - - .pytorch-discuss { - .discuss { - color: $orange; - font-weight: 400; - } - } - - @include animated_border_hover_state; -} - -.article-page-module.similar-projects { - .ecosystem-card p.card-summary { - font-size: rem(16px); - height: 36px; - @include desktop { - height: 50px; - } - } -} - -#twitter-widget iframe { - display: none !important; -} - -body.general .main-content-wrapper { - margin-top: 80px; - - @include desktop { - margin-top: 100px; - } -} - -.domain-card { - background-color: $light_grey; - padding: 40px 20px; - margin: 20px 0; - h4 { - color: $black; - } - p { - color: $dark_grey; - margin-bottom: 0; - } - &:hover { - h4 { - color: $orange; - } - } -} - diff --git a/_sass/blog.scss b/_sass/blog.scss deleted file mode 100644 index b23dd57108b1..000000000000 --- a/_sass/blog.scss +++ /dev/null @@ -1,364 +0,0 @@ -.blog { - .navbar-nav .nav-link { - color: $black; - } - - .main-content { - padding-bottom: 1.5rem; - @include desktop { - padding-top: 1.70rem; - padding-bottom: 3.5rem; - } - } - - .main-background { - height: 290px; - @include desktop { - height: 485px; - } - } - - .blog-detail-background { - height: 300px; - @include desktop { - height: 312px; - } - } - - .main-content-menu { - .navbar-nav .nav-link { - text-transform: capitalize; - - &.selected { - color: $orange !important; - text-decoration: underline; - text-decoration-color: $orange; - opacity: 0.75 !important; - } - } - - .nav-item:last-of-type { - @include desktop { - position: absolute; - right: 0; - a { - margin-right: 0; - } - } - } - } - - .zoom-in { - cursor: zoom-in; - } - - .zoomed { - cursor: zoom-out; - img { - margin: auto !important; - position: absolute; - top: 0; - left:0; - right:0; - bottom: 0; - max-width: 98%; - } - } - - .nav-logo { - background-image: url($baseurl + "/assets/images/logo-dark.svg"); - } - - .main-content-wrapper { - margin-top: 275px; - .row.blog-index { - margin-top: 30px; - p { - color: $dark_grey; - } - } - .row.blog-vertical { - display: block; - max-width: 100%; - margin: auto; - .col-md-4 { - display: initial; - } - .btn { - float: left; - } - } - .vertical-blog-container { - border-bottom: 1px solid #E2E2E2; - padding-bottom: 3rem; - &:last-of-type { - margin-bottom: 2rem; - } - } - @include desktop { - margin-top: 380px + $desktop_header_height; - .row.blog-index - [class*="col-"]:not(:first-child):not(:last-child):not(:nth-child(3n)) { - padding-right: rem(35px); - padding-left: rem(35px); - } - - .row.blog-index [class*="col-"]:nth-child(3n) { - padding-left: rem(35px); - } - - .row.blog-index [class*="col-"]:nth-child(3n + 1) { - padding-right: rem(35px); - } - - .col-md-4 { - margin-bottom: rem(23px); - } - } - - h4 { - a { - font-family: FreightSans; - font-size: rem(24px); - color: $black; - letter-spacing: 0; - line-height: rem(32px); - font-weight: 400; - } - } - - .author { - color: $orange; - font-size: rem(20px); - letter-spacing: 0.25px; - line-height: rem(30px); - margin-bottom: rem(30px); - } - - .author-icon { - position: relative; - top: rem(26px); - height: rem(17px); - width: rem(19px); - } - } - - .blog-detail-content { - padding-bottom: 2.8rem; - } - - .blog-detail-wrapper { - @include desktop { - margin-top: 234px + $desktop_header_height; - } - } - - .jumbotron { - top: rem(105px); - @include desktop { - height: rem(405px); - } - - .container { - @include desktop { - padding-bottom: rem(45px); - } - } - - .blog-index-title { - overflow: hidden; - margin-top: 1.5rem; - white-space: nowrap; - text-overflow: ellipsis; - color: white; - @include desktop { - overflow: unset; - white-space: unset; - text-overflow: unset; - } - } - - h1 { - letter-spacing: -1.65px; - font-size: rem(52px); - line-height: rem(56px); - text-transform: none; - color: $white; - a { - color: $white; - word-wrap: break-word; - } - } - - h2 { - color: $white; - } - - .blog-title { - display: inline-flex; - &:hover { - color: $white; - } - } - - .blog-detail-container { - padding-top: 4rem; - @include desktop { - padding-top: rem(174px); - } - } - - p { - font-size: rem(20px); - letter-spacing: 0; - line-height: rem(30px); - color: $white; - } - - .btn { - margin-top: rem(12px); - padding-top: rem(9px); - } - - .blog-page-container { - p.blog-date { - padding-top: rem(10px); - } - .btn { - margin-bottom: rem(10px); - } - } - } - - .blog-detail-jumbotron { - top: 45px; - @include desktop { - height: 107px; - top: 75px; - } - } - - p.blog-date { - @include blog_date_and_feature; - color: $dark_grey; - } - - p.featured-post { - @include blog_date_and_feature; - color: $white; - } - - p.featured-blog-preview { - margin-bottom: rem(12px); - } - - #blogPostFilter { - .nav-link { - opacity: 0.53; - font-size: rem(20px); - color: $black; - letter-spacing: 0; - line-height: rem(34px); - } - } - - .page-link { - font-size: rem(20px); - letter-spacing: 0; - line-height: rem(34px); - color: $orange; - width: rem(120px); - text-align: center; - } - - .blog-modal { - max-width: 75%; - top: 5rem; - &:hover { - cursor: zoom-out; - } - @media (max-width: 575px) { - max-width: 100%; - top: 10rem; - } - } - - .blog-image { - cursor: zoom-in; - } - - @media (max-width: 1067px) { - .jumbotron { - h1 { - margin-right: 0; - margin-top: 1.5rem; - a { - font-size: rem(45px); - line-height: rem(40px); - } - } - } - - .main-content-wrapper { - .col-md-4 { - margin-bottom: rem(75px); - } - } - - .similar-posts { - margin-bottom: rem(50px); - } - } - - @media (max-width: 1050px) { - .main-content-wrapper { - .author-icon { - left: rem(-30px); - } - } - } - - table { - tr { - th { - font-weight: 600; - } - } - } - - .pytorch-article { - .enterprise-azure-logo-container { - padding-left: 0; - img { - margin-bottom: 0; - } - } - } -} - -.blog .pytorch-article img { - margin-bottom: rem(18px); -} - -twitterwidget { - margin: 0 auto; - margin-top: rem(18px) !important; - margin-bottom: rem(18px) !important; -} - -.pytorch-article .outlined-code-block { - border: 1px solid black; - padding: 1rem; - margin-bottom: 1rem; - pre { - margin: 0; - padding: 0; - background-color: white; - } -} - -.pytorch-article .reference-list { - li { - overflow-wrap: anywhere; - } -} diff --git a/_sass/bootstrap-overrides.scss b/_sass/bootstrap-overrides.scss deleted file mode 100644 index 4593f29e05cc..000000000000 --- a/_sass/bootstrap-overrides.scss +++ /dev/null @@ -1,15 +0,0 @@ -.container { - padding-left: $site_horizontal_padding; - padding-right: $site_horizontal_padding; - max-width: 1240px; - - @mixin max-width-desktop { - padding-left: 0; - padding-right: 0; - } -} - -.container-fluid { - padding-left: 0; - padding-right: 0; -} diff --git a/_sass/code.scss b/_sass/code.scss deleted file mode 100644 index 21a7e83a274d..000000000000 --- a/_sass/code.scss +++ /dev/null @@ -1,50 +0,0 @@ -code, kbd, pre, samp, code b { - @include code_font_family; - span { - @include code_font_family; - } -} - -pre { - padding: rem(18px); - background-color: $code_background_color; - - code { - font-size: rem(14px); - } - - &.highlight { - background-color: $light_grey; - line-height: rem(21px); - } -} - -code.highlighter-rouge { - color: $content_text_color; - background-color: $light_grey; - padding: 2px 6px; -} - -a:link, -a:visited, -a:hover { - code.highlighter-rouge { - color: $code_link_color; - } - - &.has-code { - color: $code_link_color; - } -} - -p, -h1, -h2, -h3, -h4, -h5, -h6 { - code { - font-size: 78.5%; - } -} diff --git a/_sass/community-stories.scss b/_sass/community-stories.scss deleted file mode 100644 index 7d2c0d0c9067..000000000000 --- a/_sass/community-stories.scss +++ /dev/null @@ -1,187 +0,0 @@ -.comm-stories { - .community-stories-wrapper { - background-color: white; - } - .community-stories { - padding-top: 0; - .production-info-container, - .research-info-container { - display: flex; - flex-flow: column; - } - .sticky-top { - top: 15%; - } - } - .production-container, - .research-container { - display: flex; - padding-left: 0; - @media (max-width: 767px) { - flex-flow: wrap; - } - } - .production-section, .research-section { - max-width: 920px; - margin: 0 auto 0 auto; - padding: 0 30px 43px 30px; - width: 90%; - .production-item, .research-item { - padding-bottom: 2rem; - padding-top: 2rem; - border-bottom: 1px solid #d6d7d8; - h2 { - padding-bottom: 1rem; - } - } - } - .production-side-nav-container, - .research-side-nav-container { - #research-sidebar-list, - #production-sidebar-list{ - padding-left: 0; - .active { - color: $orange; - } - ul { - padding-left: 3rem; - list-style: none; - li { - line-height: 36px; - a { - color: #8c8c8c; - } - } - } - } - } - - .production-section, .research-section { - p { - font-size: 18px; - margin-top: 2rem; - } - @include small-desktop { - width: 100%; - padding-left: 5px; - padding-right: 5px; - } - @media (max-width: 767px) { - width: 100%; - padding-left: 5px; - padding-right: 5px; - } - } - - .main-content-wrapper { - margin-top: 275px; - @include desktop { - margin-top: 380px; - } - } - - .jumbotron { - color: $white; - height: 190px; - @include desktop { - height: 260px; - } - } -} -.ecosystem .community-stories.main-content { - padding-top: 0; -} - -.community-stories-container-fluid { - height: 5rem; - width: 100%; - padding-bottom: 7rem; - @media screen and (max-width: 767px) { - margin-top: 2rem; - } - @include full-nav-menu-desktop { - margin-left: 0; - } -} - - - -.comm-stories .community-stories.main-content .navbar { - padding-left: 0; - padding-bottom: 0; - padding-top: 0; - .nav-item { - cursor: pointer; - &:last-of-type { - position: relative; - } - } - @media (min-width: 992px) { - .nav-item { - padding: 2rem; - cursor: pointer; - } - - .nav-link { - position: relative; - top: 10%; - transform: translateY(-50%); - } - } - - .nav-select { - background-color: $white; - .nav-link { - color: $orange; - font-weight: 500; - } - } - - .nav-link { - font-size: rem(18px); - color: #8c8c8c; - @include desktop { - margin-left: rem(30px); - } - &:hover { - color: $orange; - } - } - - .community-stories-nav-link { - padding-left: rem(20px); - padding-right: rem(20px); - - @include desktop { - padding-left: rem(30px); - padding-right: rem(30px); - } - } - - .community-stories-nav { - flex-direction: row; - } - - .nav-item { - padding-top: rem(15px); - padding-bottom: rem(15px); - @include desktop { - padding-bottom: 0; - padding-top: 2rem; - } - @include small-desktop { - padding-bottom: 0; - padding-top: 2rem; - } - @media (max-width: 990px) { - padding-bottom: rem(10px); - padding-top: 1rem; - } - } - - .navbar-toggler { - margin-left: rem(40px); - } -} - - diff --git a/_sass/compact.scss b/_sass/compact.scss deleted file mode 100644 index 12578e5cbae5..000000000000 --- a/_sass/compact.scss +++ /dev/null @@ -1,82 +0,0 @@ -.compact-cards { - width: 100%; - a { - color: #6C6C6D; - &:hover { - color: $orange; - } - } -} - -.compact-hub-card-wrapper { - padding: 0; -} - -.compact-card-container { - display: flex; - align-items: center; -} - -.compact-card-body { - padding-top: 8px; - &:hover { - border-bottom: 1px solid $orange; - color: $orange; - .compact-item-title { - color: $orange - } - } - .compact-hub-card-title-container { - width: 75%; - display: flex; - } -} - -.compact-model-card { - height: auto; - border-bottom: 1px solid #E2E2E2; -} - -.compact-item-title { - padding-left: 0; - color: #000; -} - -.compact-card-summary { - white-space: nowrap; - overflow: hidden; - text-overflow: ellipsis; - top: 5px; -} - -.compact-hub-divider { - padding: 0; - width: 100%; -} - -.hub-select-container { - position: absolute; - right: 0; - height: 2rem; -} - -.compact-hub-index-cards { - padding-bottom: 2rem; -} - -.full-hub-icon { - &:hover { - cursor: pointer; - height: 3rem; - } -} - -.compact-hub-icon { - margin-left: 0.5rem; - margin-right: rem(50px); - &:hover { - cursor: pointer; - } -} - - diff --git a/_sass/contributors.scss b/_sass/contributors.scss deleted file mode 100644 index cc3507edb3d4..000000000000 --- a/_sass/contributors.scss +++ /dev/null @@ -1,339 +0,0 @@ -.ecosystem .contributor-jumbotron { - @include desktop { - height: 262px; - } - width: 90%; - - .container { - max-width: 920px; - } - - h1 { - padding-top: 0; - span { - font-weight: 300; - color: $purple; - } - } -} - -.ecosystem .contributor-jumbotron .contributor-jumbo-text { - h1 { - color: white; - } - h2 { - color: white; - padding-top: 0; - } -} - -.hidden { - display: none; -} - -.contributor-container-fluid { - height: 4rem; - width: 100%; - @media screen and (max-width: 767px) { - margin-top: 2rem; - } - @include full-nav-menu-desktop { - margin-left: 0; - } -} - -.ecosystem .contributor.main-content { - padding-top: 0; -} - -.ecosystem .contributor.main-content .navbar { - padding-left: 0; - padding-bottom: 0; - padding-top: 0; - .nav-item { - cursor: pointer; - &:last-of-type { - position: relative; - } - } - @media (min-width: 992px) { - .nav-item { - padding: 2rem; - cursor: pointer; - } - - .nav-link { - position: relative; - top: 10%; - transform: translateY(-50%); - } - } - - .nav-select { - background-color: $white; - .nav-link { - color: $orange; - font-weight: 500; - } - } - - .nav-link { - font-size: rem(18px); - color: #8c8c8c; - @include desktop { - margin-left: rem(30px); - } - &:hover { - color: $orange; - } - } - - .contributor-nav-link { - padding-left: rem(20px); - padding-right: rem(20px); - - @include desktop { - padding-left: rem(30px); - padding-right: rem(30px); - } - } - - .contributor-nav { - flex-direction: row; - } - - .nav-item { - padding-top: rem(15px); - padding-bottom: rem(15px); - @include desktop { - padding-bottom: 0; - padding-top: 2rem; - } - @include small-desktop { - padding-bottom: 0; - padding-top: 2rem; - } - @media (max-width: 990px) { - padding-bottom: rem(10px); - padding-top: 1rem; - } - } - - .navbar-toggler { - margin-left: rem(40px); - } -} - -.past-issue-container { - display: flex; - @media (max-width: 767px) { - display: block; - } -} - -.past-issue-container .get-started-cloud-sidebar{ - .sticky-top { - position: sticky; - top: 15%; - @media (max-width: 767px) { - position: relative; - top: 0; - margin-left: 0; - } - } - - .pytorch-article { - li { - list-style: initial; - } - } - - li { - list-style-type: none; - line-height: 36px; - color: #8c8c8c; - } - span { - white-space: nowrap; - } -} - -#past-issues { - max-width: 920px; - margin: auto; - margin-top: 0; - margin-bottom: 0; -} - -.contributor-container { - max-width: 920px; - left: 0; - right: 0; - margin-left: auto; - margin-right: auto; - padding-left: 30px; - padding-right: 30px; - width: 90%; -} - -.past-issue-container.container { - padding-left: 5px; - padding-top: 45px; -} - -.nav-background { - width: 100%; - background-color: $very_light_grey; -} - -#get-started-contributor-sidebar-list { - padding-left: 0; - .active { - color: $orange; - } - li { - a { - color: #8c8c8c; - } - } -} - -.two-column-row { - max-width: 920px; - margin: 0 auto 0 auto; - padding: 0 30px 43px 30px; - width: 90%; - - @include desktop { - display: flex; - } - - h2 { - text-transform: uppercase; - font-weight: 100; - margin-bottom: 30px; - } - - p { - margin-bottom: 40px; - } - - .content-left { - flex: 60%; - padding-top: 76px; - - @include desktop { - margin-right: 62px; - } - - h2 { - color: $orange; - } - - .contributor-consent-check { - max-width: 400px; - } - - .email-consent { - color: $mid_gray; - font-size: 14px; - } - - .please-accept-terms { - display: none; - color: $orange; - font-size: 14px; - } - } - - .content-right { - flex: 40%; - padding-top: 76px; - - h2 { - color: $purple; - } - } - - .contributor-form { - margin: -8px 0 47px 0; - - .form-success, - .form-fail { - color: $orange; - display: none; - flex: none; - margin: 8px 0 12px 0; - } - - form { - width: 100%; - - .contributor-form-ui { - display: flex; - max-width: 390px; - flex-wrap: wrap; - - input[type="text"] { - border: 1px solid darken($color: #f3f3f3, $amount: 5); - border-radius: 4px; - flex: 1 70%; - padding: 5px 8px 5px 8px; - margin-right: 10px; - - &::placeholder { - color: darken($color: #f3f3f3, $amount: 20); - } - - &:focus { - border: 1px solid $orange; - } - } - - input[type="submit"] { - background: darken($color: #f3f3f3, $amount: 5); - border: none; - border-radius: 4px; - color: #6d6d6d; - - &:hover { - background: darken($color: #f3f3f3, $amount: 20); - color: darken($color: #6d6d6d, $amount: 20); - } - } - } - } - - input[type="checkbox"] { - margin: 1px 6px 0 0; - } - - .contributor-consent-check { - color: $mid_gray; - margin-top: 1rem; - } - } - - .contributors-button { - background-image: url($baseurl + "/assets/images/chevron-right-orange.svg"); - background-color: $white; - background-size: 6px 13px; - background-position: center right 10px; - background-repeat: no-repeat; - border: 2px solid $light_grey; - color: $dark_grey; - cursor: pointer; - font-size: 1.125rem; - outline: none; - letter-spacing: -0.25px; - line-height: rem(28px); - margin-bottom: 0.125rem; - padding: rem(10px) rem(30px) rem(10px) rem(20px); - - a { - color: $dark_grey; - } - - @include animated_border_hover_state; - } -} diff --git a/_sass/cookie-banner.scss b/_sass/cookie-banner.scss deleted file mode 100644 index 86885c83dad1..000000000000 --- a/_sass/cookie-banner.scss +++ /dev/null @@ -1,53 +0,0 @@ -.cookie-banner-wrapper { - display: none; - - &.is-visible { - display: block; - position: fixed; - bottom: 0; - background-color: $light_grey; - min-height: 100px; - width: 100%; - z-index: 401; - border-top: 3px solid #ededee; - } - - .gdpr-notice { - color: $dark_grey; - margin-top: rem(25px); - text-align: left; - max-width: 1440px; - @include desktop { - width: 77%; - } - @include small-desktop { - width: inherit; - } - - .cookie-policy-link { - color: #343434; - } - } - - .close-button { - appearance: none; - background: transparent; - border: 1px solid $light_grey; - height: rem(21px); - position: absolute; - bottom: 42px; - right: 0; - top: 0; - cursor: pointer; - outline: none; - @include desktop { - right: 20%; - top: inherit; - } - - @include small-desktop { - right: 0; - top: 0; - } - } -} diff --git a/_sass/deep-learning.scss b/_sass/deep-learning.scss deleted file mode 100644 index 4399b4b775d0..000000000000 --- a/_sass/deep-learning.scss +++ /dev/null @@ -1,179 +0,0 @@ -.deep-learning { - .header-container { - @include max-width-desktop { - margin-bottom: 1rem; - } - } - - .jumbotron { - height: 180px; - @include desktop { - height: 250px; - } - - .thank-you-page-container { - margin-top: 0; - @include small-desktop { - margin-top: 250px; - } - } - - .deep-learning-jumbotron-text { - @include desktop { - margin-top: 55px; - - h1 { - padding-top: 30px; - } - } - @include small-desktop { - max-width: 95%; - flex-basis: 100%; - } - } - - .deep-learning-thank-you-text { - width: 80%; - .download-book-link { - display: inline-block; - } - } - - .deep-learning-landing-text { - width: 100%; - @include desktop { - width: 85% - } - } - - .deep-learning-book-container { - display: none; - @include desktop { - display: block - } - @include small-desktop { - display: none; - } - } - - .thank-you-book-container { - display: none; - @include small-desktop { - display: block; - } - @include desktop { - display: block; - } - } - } - - .deep-learning-col { - @include desktop { - max-width: 80%; - } - } - - .deep-learning-background { - @include desktop { - height: 440px; - } - } - - .header-holder { - @include desktop { - height: 90px; - } - } -} - -.deep-learning { - .main-content-wrapper { - margin-top: 250px; - @include desktop { - margin-top: 480px; - } - } - - .deep-learning-content { - @include desktop { - padding-top: 0; - } - } - - .main-background { - height: 250px; - @include desktop { - height: 440px - } - } - - .thank-you-wrapper { - margin-top: 400px; - @include desktop { - margin-top: 275px; - } - } - - .thank-you-background { - height: 438px; - @include desktop { - height: 680px; - } - } -} - -.deep-learning-container { - display: flex; - align-items: center; -} - -.deep-learning-logo { - background-image: url($baseurl + "/assets/images/pytorch-logo.png"); -} - -.deep-learning-row { - display: flex; - align-items: center; - .lead { - margin-top: 1rem; - margin-bottom: 2rem; - } - h1 { - @include small-desktop { - font-size: 3rem; - } - @include desktop { - margin-top: 2rem; - } - } -} - -.deep-learning-book { - max-width: 100%; - height: 400px; -} - -.deep-learning-form { - margin-left: -1rem; - @include desktop { - margin-left: 0; - margin-top: 1rem; - } -} - -#deep-learning-button { - margin-top: 2rem; -} - -.deep-learning-form { - .email-subscribe-form { - .deep-learning-input { - padding-left: .5rem; - background-color: #f3f4f7; - } - } - - #mce-error-response { - color: $orange; - } -} diff --git a/_sass/ecosystem.scss b/_sass/ecosystem.scss deleted file mode 100644 index 3c5289a12cdd..000000000000 --- a/_sass/ecosystem.scss +++ /dev/null @@ -1,450 +0,0 @@ -.ecosystem .jumbotron { - height: 170px; - @include desktop { - height: 300px; - } - - h1 { - padding-top: rem(135px); - color: $white; - } - - p.lead { - margin-bottom: rem(25px); - padding-top: rem(20px); - color: $white; - } - - .ecosystem-join { - margin-bottom: rem(48px); - } - - svg { - margin-bottom: rem(20px); - } -} - -.ecosystem .main-content { - @include desktop { - padding-top: 3.25rem; - } -} - -.ecosystem .main-content-wrapper { - background-color: $light_grey; - - margin-top: 340px; - @include desktop { - margin-top: 435px; - } -} - -.ecosystem.ecosystem-detail .main-content-wrapper { - background-color: $white; -} - -.ecosystem-cards-wrapper { - margin-bottom: rem(18px); - padding-top: rem(20px); - .col-md-6 { - @media (min-width: 768px) { - flex: 0 0 100%; - max-width: 100%; - } - - @include max-width-desktop { - flex: 0 0 50%; - max-width: 50%; - } - } -} - -.ecosystem .main-content-menu { - .navbar-nav .nav-link { - font-size: rem(18px); - color: $very_dark_grey; - padding-right: 0; - margin-right: rem(30px); - - &.selected { - color: $orange; - border-bottom: 1px solid $orange; - } - } - - .nav-item:last-of-type { - @include desktop { - position: absolute; - right: 0; - a { - margin-right: 0; - } - } - } -} - -.ecosystem.ecosystem-detail .main-content { - padding-bottom: 0; -} - -.ecosystem article.pytorch-article { - counter-reset: article-list; - - > ol { - padding-left: 0; - list-style-type: none; - } - - > ol > li { - @include max-width-desktop { - position: relative; - - &:before { - counter-increment: article-list; - content: counter(article-list, decimal-leading-zero); - color: #B932CC; - line-height: rem(40px); - letter-spacing: -0.34px; - font-size: rem(32px); - font-weight: 300; - position: absolute; - left: -60px; - top: -16px; - padding: rem(10px) 0; - background-color: $white; - z-index: 10; - } - - &:after { - content: ""; - width: 2px; - position: absolute; - left: -42px; - top: 0; - height: 100%; - background-color: #f3f3f3; - z-index: 9; - } - } - - > h4 { - color: $slate; - } - - ul li { - list-style-type: disc; - } - } -} - -.ecosystem .quick-starts { - background: #ecedf1; - - .title-block, - #command, - .option, - .cloud-option { - border-color: #ecedf1; - } -} - -.ecosystem { - .join-link { - color: inherit; - text-decoration: underline; - } - - .join-notice { - text-align: center; - padding-top: rem(20px); - padding-bottom: rem(40px); - - p { - color: $dark_grey; - margin-bottom: 0; - line-height: rem(30px); - } - } - - .join-jumbotron { - @include desktop { - height: 262px; - } - width: 90%; - - .container { - max-width: 920px; - } - - h1 { - padding-top: rem(5px); - color: $white; - span { - font-weight: 300; - } - } - } - - .join-wrapper { - background-color: $light_grey; - - .main-content { - @include desktop { - padding-top: 1.5rem; - } - } - - .container { - max-width: 920px; - } - - #success-response { - color: $dark_grey; - } - } - - .join-intro { - color: $dark_grey; - line-height: 28px; - } - - .requirements { - - span { - color: $black; - font-weight: bold; - } - - .join-number { - color: $purple; - display: flex; - align-items: center; - @include desktop { - padding-left: rem(10px); - } - } - - p { - margin-bottom: 0; - margin-top: rem(-7px); - @include desktop { - padding-left: rem(24px); - } - } - - .col-md-11 { - @include desktop { - border-left: 2px solid $light_grey; - } - } - } - - .row.requirements { - padding-bottom: rem(40px); - } -} - -.ecosystem .experimental { - .ecosystem-card-title-container { - display: inline-flex; - .experimental-badge { - text-transform: uppercase; - margin-left: 15px; - background-color: #e4e4e4; - color: $not_quite_black; - opacity: 0.75; - font-size: rem(10px); - letter-spacing: 1px; - line-height: rem(22px); - height: rem(20px); - width: rem(96px); - text-align: center; - margin-top: rem(4px); - } - } -} - -.ecosystem { - .ecosystem-card-title-container { - .card-title { - padding-left: 0; - font-size: 1.5rem; - color: $slate; - } - } - - .star-list { - list-style: none; - padding-left: 0; - li { - display: inline; - } - li.github-stars-count-whole-number { - display: none; - } - } - - - .icon-count-container { - display: inline-block; - vertical-align: text-bottom; - margin-left: rem(8px); - } - - .github-logo { - height: 15px; - width: 13px; - margin-left: 10px; - } - - .github-stars-count { - color: $mid_gray; - position: relative; - top: rem(4px); - font-size: 14px; - margin-left: 0.125rem; - @include desktop { - top: rem(3px); - font-size: initial; - } - } -} - -.ecosystem-divider { - position: relative; - margin-bottom: 4rem; - margin-top: 1.5rem; - top: 3rem; -} - -.ecosystem{ - #dropdownSort, #dropdownSortLeft { - margin-left: 0; - } -} - -.ecosystem{ - #dropdownSortLeft { - font-size: 19px; - top: inherit; - right: inherit; - } -} - -.ecosystem-filter-menu { - ul { - list-style-type: none; - padding-left: rem(20px); - li { - padding-right: rem(20px); - word-break: break-all; - - a { - color: $mid_gray; - &:hover { - color: $orange; - } - } - } - } -} - -.ecosystem .ecosystem-filter { - cursor: pointer; - ul { - list-style-type: none; - } -} - -.ecosystem #dropdownFilter, #dropdownSort, #dropdownSortLeft { - color: $mid_gray; - cursor: pointer; - z-index: 1; - position: absolute; -} - -.ecosystem .pagination { - .page { - border: 1px solid #dee2e6; - padding: 0.5rem 0.75rem; - } - - .active .page { - background-color: #dee2e6; - } -} - -.ecosystem-form { - .hbspt-form { - padding-bottom: rem(48px); - - .hs-form-field { - width: 100%; - } - - .hs-form-field .input input { - width: 100%; - border: none; - border-bottom: 2px solid $purple; - height: rem(44px); - outline: none; - padding-left: rem(15px); - margin-bottom: rem(30px); - } - - .hs-richtext h3 { - text-transform: uppercase; - padding-top: rem(25px); - padding-bottom: rem(30px); - } - - label { - color: $dark_grey; - } - - textarea { - width: 100%; - border: none; - border-bottom: 2px solid $purple; - outline: none; - padding-left: rem(15px); - margin-bottom: rem(30px); - height: rem(90px); - padding-top: rem(10px); - } - - ::placeholder { - color: $dark_grey; - opacity: 0.5; - ; - } - - .actions { - display: flex; - width: 100%; - justify-content: center; - } - - .hs-button { - @include desktop { - padding-left: rem(18px); - background-origin: content-box; - background-size: 30px 15px; - } - padding-left: rem(12px); - margin-top: rem(40px); - background-color: $orange; - color: $white; - cursor: pointer; - border: none; - width: 30%; - height: rem(45px); - text-align: left; - background-repeat: no-repeat; - background-image: url(/assets/images/arrow-right-with-tail-white.svg); - background-size: 30px 12px; - background-position: right; - } - - } -} \ No newline at end of file diff --git a/_sass/enterprise.scss b/_sass/enterprise.scss deleted file mode 100644 index 5b9f79fac1e3..000000000000 --- a/_sass/enterprise.scss +++ /dev/null @@ -1,35 +0,0 @@ -.mobile .enterprise-jumbotron { - height: 210px; - @include desktop { - height: 280px; - } -} -.enterprise { - padding-bottom: 0; - p, li { - color: $content_text_color; - font-size: 18px; - } - h2 { - padding-bottom: 1.5rem; - } - .container { - padding: 48px 30px 48px 30px; - } - .enterprise-gray-container { - background-color: $light_grey; - } - .pyt-enterprise-logo { - background-image: url($baseurl + "/assets/images/PTE_lockup_PRIMARY.svg"); - background-repeat: no-repeat; - height: 60px; - } - .container { - max-width: 940px; - } - .enterprise-landing-azure-logo-container { - float: left; - padding: 0; - } -} - diff --git a/_sass/events.scss b/_sass/events.scss deleted file mode 100644 index 18e89c238ca2..000000000000 --- a/_sass/events.scss +++ /dev/null @@ -1,356 +0,0 @@ -.ecosystem { - .events-wrapper { - background-color: white; - @include desktop { - margin-top: 472px; - } - } - .events { - padding-top: 0; - .event-info-container { - display: flex; - flex-flow: column; - } - .sticky-top { - top: 15%; - } - .event-label { - margin-bottom: 2rem; - } - } - .live-event-container { - display: flex; - @media (max-width: 767px) { - flex-flow: wrap; - } - } - .events-section { - max-width: 920px; - margin: 0 auto 0 auto; - padding: 0 30px 43px 30px; - width: 90%; - .event-item { - padding-bottom: 3rem; - border-bottom: 1px solid #D6D7D8; - h2 { - padding-bottom: 1rem; - } - } - } - .community-event { - margin: 0; - padding: 3px 10px; - border: 1px solid #8c8c8c; - border-radius: 3px; - text-transform: uppercase; - font-size: 14px; - font-weight: 700; - color: #8c8c8c; - } - .event-side-nav-container { - padding-left: 3rem; - ul { - list-style: none; - } - } - .live-events-section { - p { - font-size: 18px; - margin-top: 2rem; - } - @include small-desktop { - width: 100%; - padding-left: 5px; - padding-right: 5px; - } - @media (max-width: 767px) { - width: 100%; - padding-left: 5px; - padding-right: 5px; - } - } -} -.ecosystem .events.main-content { - padding-top: 0; -} - -.events-container-fluid { - height: 5rem; - width: 100%; - padding-bottom: 7rem; - @media screen and (max-width: 767px) { - margin-top: 2rem; - } - @include full-nav-menu-desktop { - margin-left: 0; - } -} - -.events-container { - max-width: 920px; - left: 0; - right: 0; - margin-left: auto; - margin-right: auto; - padding-left: 0px; - padding-right: 0px; - width: 90%; -} - - - -.ecosystem .events.main-content .navbar { - padding-left: 0; - padding-bottom: 0; - padding-top: 0; - .nav-item { - cursor: pointer; - &:last-of-type { - position: relative; - } - } - @media (min-width: 992px) { - .nav-item { - padding: .5rem; - cursor: pointer; - } - - .nav-link { - position: relative; - top: 10%; - transform: translateY(-50%); - } - } - - .nav-select { - background-color: $white; - .nav-link { - color: $orange; - font-weight: 500; - } - } - - .nav-link { - font-size: rem(18px); - color: #8c8c8c; - @include desktop { - margin-left: rem(30px); - } - &:hover { - color: $orange; - } - } - - .events-nav-link { - padding-left: rem(15px); - padding-right: rem(5px); - - @include desktop { - padding-left: rem(20px); - padding-right: rem(20px); - } - } - - .events-nav { - flex-direction: row; - } - - .nav-item { - padding-top: rem(15px); - padding-bottom: rem(15px); - @include desktop { - padding-bottom: 0; - padding-top: 2rem; - } - @include small-desktop { - padding-bottom: 0; - padding-top: 2rem; - } - @media (max-width: 990px) { - padding-bottom: rem(10px); - padding-top: 1rem; - } - } - - .navbar-toggler { - margin-left: rem(40px); - } -} - -.events-video-wrapper { - width: 100%; - border: 1px solid $mid_gray; - background-color: $light_grey; - height: 21rem; - margin-top: 2.5rem; - .video-container { - display: flex; - top: 12%; - } - .video-tabs { - display: flex; - } - .events-video-nav { - flex-direction: row; - padding-right: 0; - margin-bottom: 1rem; - .nav-item { - border-right: 1px solid $mid_gray; - border-bottom: 1px solid $mid_gray; - } - .nav-select { - background-color: $white; - border-bottom: none; - .nav-link { - color: $orange; - } - } - } - .events-nav-link { - text-align: center; - } - .video { - position: relative; - height: 0; - padding-bottom: 30%; - place-self: center; - } - .video-info { - margin-left: 3rem; - max-width: 45%; - } - iframe { - height: 100%; - width: 100%; - position: absolute; - } -} -.video-links-container { - border: 1px solid $mid_gray; - .video-links { - display: flex; - .video-link-item { - padding-left: 1rem; - list-style: none; - } - } -} -.episode-header-text { - font-size: 26px; - margin-bottom: 2rem; -} -.episode-card-row { - display: block; - @media screen and (min-width: 908px) { - display: flex; - flex-wrap: wrap; - margin-bottom: 2rem; - } - .episode-card.resource-card { - height: 14rem; - margin-right: 1rem; - margin-bottom: 1rem; - background-color: $light_grey; - border: none; - max-width: 31%; - flex: auto; - ul { - list-style: none; - } - a{ - color: inherit; - } - .episode-body { - display: block; - position: relative; - top: 30px; - margin-left: 20px; - } - - .episode-title { - margin-left: 3.2rem; - margin-bottom: .5rem; - font-size: rem(24px); - @include desktop { - margin-left: 2.5rem; - } - } - - .guest-name { - font-weight: 500; - font-size: rem(20px); - overflow: hidden; - white-space: nowrap; - text-overflow: ellipsis; - } - - .episode-info { - display: flex; - justify-content: space-between; - span { - padding-left: 5px; - padding-right: 5px; - } - } - .info-divide { - display: block; - border-bottom: 1px solid #D6D7D8; - margin-top: .5rem; - margin-bottom: .5rem; - } - .episode-poster { - color: $orange; - } - .episode-date-time { - display: flex; - padding-left: 0; - span { - padding-left: 5px; - padding-right: 5px; - } - } - @media screen and (max-width: 907px) { - max-width: 100%; - margin-bottom: 1.25rem; - } - } - .episode-card.resource-card.pytorch-resource:before { - content: ""; - background-size: 32px 32px; - background-repeat: no-repeat; - display: block; - position: absolute; - height: 32px; - width: 32px; - top: 30px; - left: 15px; - @include desktop { - left: 30px; - top: 30px; - } - } -} - -.podcast-container { - padding-left: 0; - @include desktop { - display: flex; - .podcast-card:not(:first-of-type) { - margin-left: 1rem; - } - } - .podcast-card { - display: flex; - align-items: center; - justify-content: center; - margin-top: 2rem; - border: 1px solid #D6D7D8; - height: rem(140px); - @include animated_border_hover_state; - } - .podcast-title { - font-size: 24px; - font-weight: 400; - } - -} diff --git a/_sass/features.scss b/_sass/features.scss deleted file mode 100644 index 3d77df1e3326..000000000000 --- a/_sass/features.scss +++ /dev/null @@ -1,188 +0,0 @@ -.features { - .main-content { - padding-bottom: 0; - } - .navbar-nav .nav-link { - color: $black; - } - - .nav-logo { - background-image: url($baseurl + "/assets/images/logo-dark.svg"); - } - - .main-background { - @include desktop { - height: 575px - } - } -} - -.features { - .main-content-wrapper { - margin-top: 350px; - @include desktop { - margin-top: 540px; - } - } -} - -.features-row { - padding-bottom: rem(60px); - align-items: center; - - &:first-of-type { - margin-top: rem(20px); - } - - &:last-of-type { - padding-bottom: rem(72px); - } - - @include desktop { - padding-bottom: rem(96px); - &:first-of-type { - margin-top: 4.05rem; - } - } - - h3 { - font-size: rem(32px); - letter-spacing: 1.78px; - line-height: rem(36px); - font-weight: 400; - text-transform: uppercase; - margin-bottom: rem(20px); - font-weight: 300; - - @include small-desktop { - width: 80%; - } - - @include max-width-desktop { - width: 590px; - } - } - - p { - font-size: rem(18px); - letter-spacing: 0.25px; - line-height: rem(28px); - color: $dark_grey; - padding-right: rem(30px); - - @include small-desktop { - width: 80%; - } - - @include max-width-desktop { - width: 590px; - } - } - - .feature-content-holder { - width: 100%; - - @include max-width-desktop { - width: 495px; - } - - pre.highlight { - margin-bottom: 0; - } - } - - &:nth-child(odd) { - .col-md-6:nth-child(1n) { - order: 2; - } - - .col-md-6:nth-child(2n) { - order: 1; - } - - @include desktop { - .col-md-6:nth-child(1n) { - order: 1; - } - - .col-md-6:nth-child(2n) { - order: 2; - } - } - } - - &:nth-child(1n) { - h3 { - color: #B73BC9; - } - - .feature-content-holder { - border-bottom: 2px solid #B73BC9; - } - } - - &:nth-child(2n) { - h3 { - color: #D92F4C; - } - - .feature-content-holder { border-bottom: 2px solid #D92F4C; - } - } - - &:nth-child(3n) { - h3 { - color: #8038E0; - } - - .feature-content-holder { - border-bottom: 2px solid #8038E0; - } - } - - .col-md-6 { - @include max-width-desktop { - padding-left: 0; - padding-right: 0; - } - - @include desktop { - &:nth-of-type(2) { - .feature-content { - width: 100%; - - h3, p, .feature-content-holder { - float: right; - } - } - } - } - } -} - -.features .jumbotron { - height: 200px; - @include desktop { - height: 195px; - } - @media (max-width: 320px) { - height: 250px; - } - h1 { - padding-top: rem(30px); - } - @include desktop { - height: 468px; - h1 { - padding-top: 0; - } - } - h1, p { - color: $white; - } - .btn { - @include desktop { - margin-top: rem(6px); - } - } -} diff --git a/_sass/fonts.scss b/_sass/fonts.scss deleted file mode 100644 index 61c8cd48e86f..000000000000 --- a/_sass/fonts.scss +++ /dev/null @@ -1,111 +0,0 @@ -@font-face { - font-family: FreightSans; - font-weight: 700; - font-style: normal; - src: url($baseurl + "/assets/fonts/FreightSans/freight-sans-bold.woff2") format("woff2"), - url($baseurl + "/assets/fonts/FreightSans/freight-sans-bold.woff") format("woff"); -} - -@font-face { - font-family: FreightSans; - font-weight: 700; - font-style: italic; - src: url($baseurl + "/assets/fonts/FreightSans/freight-sans-bold-italic.woff2") format("woff2"), - url($baseurl + "/assets/fonts/FreightSans/freight-sans-bold-italic.woff") format("woff"); -} - -@font-face { - font-family: FreightSans; - font-weight: 500; - font-style: normal; - src: url($baseurl + "/assets/fonts/FreightSans/freight-sans-medium.woff2") format("woff2"), - url($baseurl + "/assets/fonts/FreightSans/freight-sans-medium.woff") format("woff"); -} - -@font-face { - font-family: FreightSans; - font-weight: 500; - font-style: italic; - src: url($baseurl + "/assets/fonts/FreightSans/freight-sans-medium-italic.woff2") format("woff2"), - url($baseurl + "/assets/fonts/FreightSans/freight-sans-medium-italic.woff") format("woff"); -} - -@font-face { - font-family: FreightSans; - font-weight: 100; - font-style: normal; - src: url($baseurl + "/assets/fonts/FreightSans/freight-sans-light.woff2") format("woff2"), - url($baseurl + "/assets/fonts/FreightSans/freight-sans-light.woff") format("woff"); -} - -@font-face { - font-family: FreightSans; - font-weight: 100; - font-style: italic; - src: url($baseurl + "/assets/fonts/FreightSans/freight-sans-light-italic.woff2") format("woff2"), - url($baseurl + "/assets/fonts/FreightSans/freight-sans-light-italic.woff") format("woff"); -} - -@font-face { - font-family: FreightSans; - font-weight: 400; - font-style: italic; - src: url($baseurl + "/assets/fonts/FreightSans/freight-sans-book-italic.woff2") format("woff2"), - url($baseurl + "/assets/fonts/FreightSans/freight-sans-book-italic.woff") format("woff"); -} - -@font-face { - font-family: FreightSans; - font-weight: 400; - font-style: normal; - src: url($baseurl + "/assets/fonts/FreightSans/freight-sans-book.woff2") format("woff2"), - url($baseurl + "/assets/fonts/FreightSans/freight-sans-book.woff") format("woff"); -} - -@font-face { - font-family: IBMPlexMono; - font-weight: 600; - font-style: normal; - unicode-range: u+0020-007f; - src: local("IBMPlexMono-SemiBold"), - url($baseurl + "/assets/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2") - format("woff2"), - url($baseurl + "/assets/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff") - format("woff"); -} - -@font-face { - font-family: IBMPlexMono; - font-weight: 500; - font-style: normal; - unicode-range: u+0020-007f; - src: local("IBMPlexMono-Medium"), - url($baseurl + "/assets/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2") - format("woff2"), - url($baseurl + "/assets/fonts/IBMPlexMono/IBMPlexMono-Medium.woff") - format("woff"); -} - -@font-face { - font-family: IBMPlexMono; - font-weight: 400; - font-style: normal; - unicode-range: u+0020-007f; - src: local("IBMPlexMono-Regular"), - url($baseurl + "/assets/fonts/IBMPlexMono/IBMPlexMono-Regular.woff2") - format("woff2"), - url($baseurl + "/assets/fonts/IBMPlexMono/IBMPlexMono-Regular.woff") - format("woff"); -} - -@font-face { - font-family: IBMPlexMono; - font-weight: 300; - font-style: normal; - unicode-range: u+0020-007f; - src: local("IBMPlexMono-Light"), - url($baseurl + "/assets/fonts/IBMPlexMono/IBMPlexMono-Light.woff2") - format("woff2"), - url($baseurl + "/assets/fonts/IBMPlexMono/IBMPlexMono-Light.woff") - format("woff"); -} diff --git a/_sass/footer.scss b/_sass/footer.scss deleted file mode 100644 index 0433b1a0e446..000000000000 --- a/_sass/footer.scss +++ /dev/null @@ -1,511 +0,0 @@ -.site-footer { - padding: rem(60px) 0; - width: 100%; - background: $black; - background-size: 100%; - margin-left: 0; - margin-right: 0; - - @include desktop { - position: absolute; - left: 0; - bottom: 0; - height: $desktop_footer_height; - } - - p { - color: $white; - } - - ul { - list-style-type: none; - padding-left: 0; - margin-bottom: 0; - } - - ul li { - font-size: rem(18px); - line-height: rem(32px); - color: #A0A0A1; - padding-bottom: rem(6px); - - &.list-title { - padding-bottom: rem(12px); - color: $white; - p { - margin-bottom: 0; - } - } - } - - a:link, - a:visited { - color: inherit; - } - - @include desktop { - a:hover { - color: $orange; - } - } - - .privacy-policy { - background: #000000; - border-top: 1px solid #fff; - display: flex; - flex-direction: column; - margin-top: 40px; - ul { - border-bottom: 1px solid white; - .privacy-policy-links { - padding-bottom: 1rem; - padding-top: 1rem; - padding-right: 1rem; - display: inline-flex; - color: white; - } - } - .copyright { - padding-top: 1rem; - p { - color: #dfdfdf; - font-size: 14px; - } - a { - color: #dfdfdf; - font-weight: 600; - &:hover { - color: #dfdfdf; - font-weight: 600; - } - } - } - } -} - -.docs-tutorials-resources { - background-color: $slate; - color: $white; - padding-top: rem(40px); - padding-bottom: rem(40px); - - @include desktop { - padding-top: rem(66px); - padding-bottom: 4.09rem; - } - - h2 { - font-size: rem(24px); - letter-spacing: -0.25px; - text-transform: none; - margin-bottom: 0.25rem; - - @include desktop { - margin-bottom: rem(20px); - } - } - - .col-md-4 { - margin-bottom: rem(32px); - @include desktop { - margin-bottom: 0; - } - } - - .with-right-arrow { - margin-left: 12px; - background-position: top 3px right 11px; - - @include desktop { - background-position: top 6px right 11px; - } - - &:hover { - background-image: url($baseurl + "/assets/images/chevron-right-white.svg"); - } - } - - p { - font-size: rem(16px); - line-height: rem(24px); - letter-spacing: 0.22px; - color: #A0A0A1; - margin-bottom: rem(8px); - - @include desktop { - margin-bottom: rem(20px); - } - } - - a { - font-size: rem(18px); - color: $orange; - &:hover { - color: $white; - } - } -} - -.footer-container { - position: relative; -} - -.footer-logo-wrapper { - display: none; - @include desktop { - display: flex; - grid-column: span 6; - } - .footer-logo { - img { - width: 40px; - } - } -} - - -.footer-links-wrapper { - display: flex; - flex-wrap: wrap; - padding-bottom: 1rem; - border-bottom: 1px solid white; - - @include desktop { - flex-wrap: initial; - justify-content: flex-end; - } -} - -.footer-links-col { - margin-bottom: rem(60px); - width: 50%; - - @include desktop { - margin-bottom: 0; - width: 14%; - margin-right: 23px; - - &.follow-us-col { - width: 18%; - margin-right: 0; - } - } - - @include small-desktop { - width: 18%; - margin-right: 30px; - } -} - -.footer-social-icons { - margin: rem(137px) 0 rem(40px) 0; - - a { - height: 32px; - width: 32px; - display: inline-block; - background-color: $very_dark_grey; - border-radius: 50%; - margin-right: 5px; - - &.facebook { - background-image: url($baseurl + "/assets/images/logo-facebook-dark.svg"); - background-position: center center; - background-size: 9px 18px; - background-repeat: no-repeat; - } - - &.twitter { - background-image: url($baseurl + "/assets/images/logo-twitter-dark.svg"); - background-position: center center; - background-size: 17px 17px; - background-repeat: no-repeat; - } - - &.youtube { - background-image: url($baseurl + "/assets/images/logo-youtube-dark.svg"); - background-position: center center; - background-repeat: no-repeat; - } - } -} - -.site-footer { - .mc-field-group { - margin-top: -2px; - } -} - -.site-footer { - .email-subscribe-form input[type="submit"]{ - top: 9px; - @include desktop { - top: 13px; - } - } -} - -.social-links { - grid-column: span 12; - - @media (min-width: 600px) { - grid-column: span 8; - } - - @include desktop { - grid-column: span 6; - align-self: end; - } - - @media (max-width: 999px) { - margin-left: 10px; - margin-right: 10px; - } - - display: grid; - grid-column-gap: 3%; - grid-row-gap: 30px; - grid-template-columns: repeat(6, minmax(0, 1fr)); - - li { - text-align: center; - } -} - -.social-links { - svg { - height: 25px; - max-width: 30px; - fill: #fff; - color: #fff; - &:hover { - fill: #ee4c2c; - color: #ee4c2c; - } - } -} - -.lf-grid { - grid-column-gap: 3%; - grid-row-gap: 30px; - display: grid; - grid-template-columns: repeat(12,1fr); -} - -// removes captcha image from flow. -.hs-recaptcha { - display: none; -} - -.newsletter { - line-height: 140%; - margin-bottom: 80px; - - &__title { - line-height: 140%; - font-size: 24px; - @media (min-width: 1000px) { - font-size: 40px; - } - } - - .legal-consent-container { - display: none; - } - - p.newsletter__privacy { - max-width: 860px; - margin-top: 30px; - line-height: 21px; - font-size: 14px; - color: #dfdfdf; - a { - color: #dfdfdf; - font-weight: 600; - &:hover { - color: #dfdfdf; - font-weight: 600; - } - } -} - - // form container. - .hbspt-form { - min-height: 300px; - @media (min-width: 500px) { - min-height: 100px; - } - @media (min-width: 1000px) { - min-height: 20px; - } - - // Displays if required field not filled. - .hs-error-msg { - display: block; - margin-right: 8px; - color: $orange; - font-size: 14px; - line-height: 1.1em; - width: 95%; - padding-top: 15px; - } - - // form inputs wrapper. - .hs-form { - display: grid; - grid-template-columns: 1fr; - grid-gap: 30px; - - @media (min-width: 500px) { - grid-template-columns: minmax(0, 1fr) minmax(0, 1fr); - } - - @media (min-width: 700px) { - grid-template-columns: repeat(3, minmax(0, 1fr)); - } - - @media (min-width: 950px) { - grid-template-columns: 1fr 1fr 1fr 1fr 1fr; - grid-row-gap: 1.5rem; - grid-column-gap: 1.5rem; - } - - input[type='text'], - input[type='email'] { - height: 50px; - @media (min-width: 500px) { - height: 42px; - } - width: 100%; - background: transparent; - border: none; - border-bottom: 2px solid $white; - border-radius: 0; - transition: all 0.25s ease; - color: $white; - font-size: 16px; - @media (min-width: 500px) { - font-size: 20px; - } - line-height: 105%; - &::placeholder { - color: $white; - font-size: 16px; - @media (min-width: 500px) { - font-size: 20px; - } - line-height: 105%; - } - &:focus { - outline: 0; - border-bottom: 2px solid $orange; - transition: color 0.25s ease; - &::placeholder { - transition: color 0.25s ease; - color: transparent; - } - } - } - - // Controls autocomplete styles. - input, - textarea, - select { - &:-webkit-autofill, - &:-webkit-autofill:hover, - &:-webkit-autofill:focus { - -webkit-text-fill-color: $white; - } - } - - select { - appearance: none; - background: transparent; - border: 0px solid transparent; - border-bottom: 2px solid $white; - border-radius: 0; - box-shadow: 0 1px 0 1px rgba(0, 0, 0, 0); - display: block; - height: 50px; - @media (min-width: 500px) { - height: 42px; - } - margin: 0; - max-width: 100%; - padding: 0.25em 0 calc(0.25em + 1px) 5px; - transition: all 0.25s ease; - width: 100%; - color: $white; - font-size: 16px; - @media (min-width: 500px) { - font-size: 20px; - } - line-height: 105%; - - &::-ms-expand { - display: none; - } - - &:focus { - outline: 0; - border-bottom: 2px solid $orange; - &::placeholder { - transition: color 0.4s ease; - color: transparent; - } - } - - option { - font-weight: normal; - color: black; - } - } - - .hs-button { - border-radius: 5px; - margin-top: 20px; - border: none; - background-color: $orange; - color: $white; - font-weight: 400; - padding: 11px 40px; - font-size: 16px; - font-weight: 700; - text-decoration: none; - } - - // underline errors. - .hs-input.invalid { - border-bottom: 2px dashed red !important; - } - - // hide general error message. - .hs_error_rollup { - display: none; - } - } - } - - // success message for newsletter footer only. - .submitted-message { - display: flex; - align-content: center; - align-items: center; - justify-content: center; - border: 2px solid $white; - min-height: 280px; - font-size: 18px; - padding: 20px 20px 0; - line-height: 1.1em; - @media (min-width: 500px) { - min-height: 80px; - } - @media (min-width: 1000px) { - min-height: unset; - } - } - - .submitted-message p { - max-width: none; - } -} diff --git a/_sass/get-started.scss b/_sass/get-started.scss deleted file mode 100644 index ca3c335c41fe..000000000000 --- a/_sass/get-started.scss +++ /dev/null @@ -1,320 +0,0 @@ -.get-started article { - margin-bottom: rem(80px); -} - -.get-started .quick-start-guides { - ul { - margin-bottom: 0; - padding-left: 0; - } -} - -.get-started .main-content-wrapper { - margin-top: 275px; - @include desktop { - margin-top: 260px + $desktop_header_height; - } -} - -.get-started .jumbotron { - height: 190px; - @include desktop { - height: 260px; - } -} - -.get-started .main-content .navbar { - background-color: #f3f4f7; - - padding-left: 0; - padding-bottom: 0; - padding-top: 0; - - @media (min-width: 992px) { - li:first-of-type { - padding-left: rem(55px); - } - - .nav-item { - padding: 1rem; - cursor: pointer; - } - - .nav-link { - position: relative; - top: 10%; - transform: translateY(-50%); - } - } - - .nav-select { - background-color: $white; - .nav-link { - color: $orange; - font-weight: 500; - } - } - - .nav-link { - font-size: rem(18px); - color: #8c8c8c; - - &:hover { - color: $orange; - } - } - - .get-started-nav-link { - padding-left: rem(20px); - padding-right: rem(20px); - - @include desktop { - padding-left: rem(30px); - padding-right: rem(30px); - } - } - - .nav-item { - padding-top: rem(15px); - padding-bottom: rem(15px); - @include desktop { - padding-bottom: 0; - padding-top: 2rem; - } - @include small-desktop { - padding-bottom: 0; - padding-top: 2rem; - } - @media (max-width: 990px) { - padding-bottom: rem(10px); - padding-top: 1rem; - } - } - - .navbar-toggler { - margin-left: rem(40px); - } -} - -.get-started .main-content { - padding-top: 0; - @include desktop { - padding-top: 1.9rem; - } -} - -.get-started .quick-start-module { - padding-bottom: 0; - padding-top: 0; - background-color: $white; - - .option, - #command { - border: 2px solid $white; - background: $light_grey; - } - - .title-block { - border: 2px solid $white; - } - - .selected { - background-color: $orange; - } - - h1 { - font-size: rem(32px); - letter-spacing: 1.78px; - line-height: rem(40px); - text-transform: uppercase; - margin-bottom: rem(24px); - } -} - -.get-started .nav-menu-wrapper { - background-color: $light_grey; - .container { - padding-left: 0; - padding-right: 0; - @include desktop { - padding-left: 30px; - padding-right: 30px; - } - } -} - -.get-started .navbar-nav { - flex-direction: row; -} - -#installation .os { - display: none; -} - -#installation .selected { - display: block; -} - -#cloud .platform { - display: none; -} - -#cloud .selected { - display: block; -} - -.screencast { - iframe { - width: 100% !important; - } - display: none; -} - -.get-started { - .quick-starts { - .row.ptbuild, - .row.os, - .row.package, - .row.language, - .row.cuda { - margin-bottom: rem(20px); - @include desktop { - margin-bottom: 0; - } - } - - @include small-desktop { - flex: 0 0 100%; - max-width: 100%; - } - - @include desktop { - margin-bottom: rem(40px); - - .row { - margin-bottom: 0; - } - } - - @include max-width-desktop { - margin-bottom: 0; - } - } -} - -.get-started .get-started-locally-sidebar { - padding-top: rem(40px); - padding-bottom: rem(40px); - top: 15%; - z-index: 385; - - @include desktop { - padding-top: 0; - max-height: 100vh; - overflow: auto; - } - - ul { - padding-left: 0; - } - - li { - list-style-type: none; - line-height: 36px; - - a { - color: #8c8c8c; - &.active, - &:hover { - color: $orange; - } - } - - .subitem { - padding-left: rem(20px); - } - } - - li.subitem { - padding-left: rem(20px); - } -} - -.cloud-nav { - display: none; -} - -.get-started .get-started-cloud-sidebar { - padding-top: rem(50px); - padding-bottom: rem(40px); - top: 15%; - - ul { - padding-left: 0; - } - - li { - list-style-type: none; - line-height: 36px; - - a { - color: #8c8c8c; - &.active, - &:hover { - color: $orange; - } - } - - .subitem { - padding-left: rem(20px); - } - } - - li.subitem { - padding-left: rem(20px); - } -} - -.pytorch-2 .article-wrapper article.pytorch-article table tr td:first-of-type { - padding-left: 10px; -} - -.pytorch-2 .article-wrapper article.pytorch-article { - table,td{ - border: 1px solid #A0A0A1; - padding: 10px; - } - - b, em, h3, h2, p, a, strong, td, tr { - font-family: Verdana; - } - - ul, ol { - margin: 1.5rem 0 1.5rem 0; - - li { - font-family: Verdana; - } - } - - code { - font-family: IBMPlexMono,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace; - padding: 2px; - color: inherit; - background-color: #f1f1f1; - } - - p, a { - font-family: Verdana; - word-break: break-word; - strong { - font-family: Verdana; - } - } - - .QnATable { - @media screen and (max-width: 418px) { - max-width: 95vw; - } - } -} diff --git a/_sass/homepage.scss b/_sass/homepage.scss deleted file mode 100644 index 8ec34e9ca4d9..000000000000 --- a/_sass/homepage.scss +++ /dev/null @@ -1,393 +0,0 @@ -.homepage { - - .main-content-wrapper { - margin-top: 315px; - @include desktop { - margin-top: 472px; - } - } - h2 { - margin-bottom: rem(25px); - text-transform: uppercase; - letter-spacing: 1.78px; - line-height: rem(40px); - - @include desktop { - margin-bottom: rem(33px); - } - } - - h3 { - font-size: rem(24px); - letter-spacing: 1.33px; - line-height: rem(32px); - text-transform: uppercase; - margin-bottom: rem(20px); - } - - h5 { - margin-bottom: rem(8px); - @include desktop { - margin-bottom: rem(15px); - } - } - - .jumbotron { - height: 195px; - @include desktop { - height: 395px; - } - .btn { - margin-top: rem(6px); - } - } - - .ecosystem-row { - .card { - background-color: $light_grey; - } - } - - .homepage-header { - background-color: rgba(0, 0, 0, 0.165); - } -} - -.homepage-feature-module { - padding-top: rem(40px); - padding-bottom: rem(40px); - - @include desktop { - padding-top: rem(62px); - padding-bottom: rem(72px); - - .module-button { - position: absolute; - right: 15px; - top: 0; - } - } - - p { - color: $dark_grey; - font-size: 1.125em; - } - - .title { - color: $black; - font-weight: 300; - font-size: rem(24px); - - @include small-desktop { - font-size: rem(20px); - } - } - - .pytorch-title { - font-size: rem(24px); - letter-spacing: 0.33px; - line-height: rem(36px); - } - - .subtext { - font-size: rem(18px); - color: #8c8c8c; - letter-spacing: 0; - line-height: rem(24px); - - @include small-desktop { - font-size: rem(15px); - } - } -} - -.key-features-module { - padding-bottom: 0; - - @include desktop { - padding-bottom: 1.55rem; - } - - .key-features-boxes { - margin-top: rem(32px); - @include desktop { - margin-top: 0; - } - } - - .key-feature-box { - margin-bottom: rem(32px); - - p { - margin-bottom: 0; - letter-spacing: 0.25px; - } - - @include desktop { - margin-bottom: rem(40px); - } - } -} - -.community-heading { - margin-top: rem(32px); -} - -.community-module { - background-color: $white; - - .ecosystem-card { - height: auto; - - @include small-desktop { - padding: rem(10px); - } - } - - h2 { - margin-bottom: 0; - } - - h5 { - text-transform: uppercase; - color: #c6000a; - margin-bottom: rem(20px); - } - - .h2-subheadline { - margin-top: rem(20px); - margin-bottom: 2.6rem; - - @include desktop { - margin-top: 0; - } - } - - .card-body { - @include small-desktop { - padding: rem(10px); - } - } - - .module-button { - background-color: $light_grey; - } - - p { - margin-bottom: rem(40px); - letter-spacing: 0.25px; - } - - .module-subtext { - margin-right: rem(250px); - } - - .email-subscribe-form input.email { - border-bottom: 1px solid #d6d7d8; - font-size: rem(20px); - line-height: 0; - padding-bottom: rem(12px); - } - - .email-subscribe-form input[type="submit"] { - top: 6px; - @include desktop { - top: 10px; - } - } -} - -.pytorch-users-module, -.homepage-bottom-wrapper { - background-color: $light_grey; -} - -.pytorch-users-module { - @include desktop { - padding-bottom: 1.9rem; - } -} - -.community-avatar { - height: 60px; - width: 60px; -} - -.community-logo-bottom { - height: 200px; - background-color: $light_grey; -} - -.university-testimonials h2 { - margin-bottom: 2.2rem; -} - -.university-testimonials-content { - margin-top: rem(40px); - margin-bottom: 2rem; - - @include desktop { - margin-top: 0; - } - - .col-md-4 { - margin-bottom: rem(40px); - } - - .case-study-title { - font-size: rem(24px); - margin-bottom: rem(20px); - } - - p { - color: $dark_grey; - font-size: 1.125rem; - letter-spacing: 0.25px; - } - - .btn { - background-color: $white; - } -} - -.follow-us-on-twitter h2 { - margin-bottom: rem(20px); - @include desktop { - margin-bottom: rem(40px); - } -} - -.homepage-feature-module .tweets-wrapper p { - font-size: rem(16px); -} - -.quick-starts { - p { - font-size: rem(18px); - line-height: rem(28px); - } -} - -.quick-start-guides { - font-size: rem(24px); - letter-spacing: 0.25px; - line-height: rem(36px); - color: #a5a5a5; - - .step-counter { - margin-bottom: rem(3px); - } - - ul { - list-style-type: none; - padding-left: 0; - li { - margin-bottom: 0; - font-size: rem(18px); - - @include desktop { - margin-bottom: rem(12px); - &:last-of-type { - margin-bottom: 0; - } - } - - &.selected { - color: $orange; - &:before { - content: "\2022"; - position: absolute; - left: 0; - @include desktop { - left: -5px; - } - } - } - } - } - - .select-instructions { - color: $slate; - border-bottom: 2px solid #a5a5a5; - margin-bottom: rem(16px); - font-size: rem(18px); - display: inline-block; - @include desktop { - margin-bottom: 0; - } - } -} - -.homepage .news-banner-container { - background: $black; - color: $white; - text-align: center; - padding: 20px; - width: 90%; - - .right-arrow, .left-arrow { - height: 15px; - bottom: -3px; - position: relative; - @include desktop { - bottom: -8px; - } - &:hover { - cursor: pointer; - } - } - .right-arrow { - float: right; - } - .left-arrow { - float: left; - } -} - -.homepage #news-items { - .pagination { - display: none !important; - } -} - -.banner-info { - display: inline-block; - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; - margin: auto; - width: 80%; - font-size: rem(18px); - @include desktop { - padding-top: 3px; - } - &:hover { - cursor: pointer; - color: $orange; - } -} - -.news-banner-text { - a { - color: white; - &:hover { - color: $orange; - } - } -} - -.no-banner { - padding-bottom: 2rem; -} - -.homepage-box-module { - div.col-md { - background: #F3F4F7; - margin: 10px; - padding: 30px; - - @include desktop { - margin: 20px; - } - } -} \ No newline at end of file diff --git a/_sass/hub-search.scss b/_sass/hub-search.scss deleted file mode 100644 index 6abaf474501e..000000000000 --- a/_sass/hub-search.scss +++ /dev/null @@ -1,106 +0,0 @@ -.hub .hub-search-wrapper { - @include desktop { - top: 8px; - } - .algolia-autocomplete .ds-dropdown-menu { - min-width: 100%; - max-width: 100% !important; - } - .algolia-autocomplete { - width: 100%; - } - &.active { - width: 100%; - } - span { - font-size: 1.125rem; - text-align: center; - } -} - -.hub #hub-search-icon { - @media (max-width: 480px) { - margin-top: 1rem; - } -} - -#hub-search-icon { - background-image: url($baseurl + "/assets/images/search-icon.svg"); - color: transparent; - opacity: 0.4; - width: 25px; - height: 25px; - margin-left: 3rem; - background-size: 15px 20px; - background-repeat: no-repeat; - right: 10px; - position: absolute; - z-index: 1; - cursor: pointer; - &:hover { - background-image: url($baseurl + "/assets/images/search-icon-orange.svg"); - opacity: 1; - } -} - -#hub-search-input { - background-color: $very_dark_grey; - border: none; - color: $black; - font-size: rem(18px); - font-weight: 300; - line-height: 20px; - outline: none; - position: relative; - display: none; - width: 100%; - border-radius: 5px; - padding: rem(14px) 0 rem(14px) rem(5px); -} - -#hub-close-search { - display: none; - margin-left: 20px; - opacity: 0.4; - right: 10px; - position: absolute; - z-index: 1; - cursor: pointer; - font-size: rem(18px); - @include desktop { - top: rem(18px); - } - &:hover { - color: $orange; - opacity: 1; - } -} - -.hub .hub-divider { - margin-bottom: 2.2rem; - margin-top: 1.5rem; -} - -.hub .active-hub-divider{ - border-color: $orange; -} - -.hub .hub-search-border { - display: flex; - align-items: center; - flex-direction: row; - border: none; - background-color: transparent; - border-radius: 20px; - width: 100%; -} - -.hub .hub-cards-wrapper { - z-index: 1000; -} - -.hub .nav-container { - display: flex; - width: 100%; - position: absolute; -} diff --git a/_sass/hub.scss b/_sass/hub.scss deleted file mode 100644 index cf3133e3f012..000000000000 --- a/_sass/hub.scss +++ /dev/null @@ -1,632 +0,0 @@ -.hub .jumbotron { - height: 300px; - @include desktop { - height: 420px; - } - - h1 { - color: $white; - #hub-header, #hub-sub-header { - font-weight: lighter; - } - #hub-sub-header { - } - } - - p.lead, p.hub-release-message { - margin-bottom: rem(25px); - padding-top: rem(25px); - color: $white; - - @include desktop { - width: 77%; - } - } - - p.hub-release-message { - padding-top: 0; - font-style: italic; - } - - svg { - margin-bottom: rem(20px); - } - - p.detail-lead { - padding-top: rem(50px); - color: $mid_gray; - width: 100%; - margin-bottom: 0px; - } - - p.lead-summary { - color: $dark_grey; - } -} - -.hub.hub-index .jumbotron { - height: 280px; - @include desktop { - height: 325px; - } -} - -.hub .detail-github-link { - background: $orange; - color: $white; -} - -.hub .detail-colab-link { - background: $yellow; - color: $black; -} - -.hub .detail-web-demo-link { - background: #4a9fb5; - color: $white; -} - -.hub { - .detail-colab-link, .detail-github-link, .detail-web-demo-link { - margin-top: 1rem; - } -} - -.hub { - .detail-button-container { - margin-top: rem(45px); - @include small-desktop { - margin-top: rem(20px); - } - @media (max-width: 320px) { - margin-top: rem(20px); - } - @media (max-width: 360px) { - margin-top: rem(20px); - } - } -} - -.hub a { - .detail-colab-link, .detail-github-link { - padding-right: rem(50px); - } -} - -.hub .detail-arrow { - color: $orange; - @include desktop { - font-size: 4.5rem; - } - font-size: 2.5rem; -} - -.hub .with-right-white-arrow { - padding-right: rem(32px); - position: relative; - background-image: url($baseurl + "/assets/images/chevron-right-white.svg"); - background-size: 6px 13px; - background-position: top 10px right 11px; - background-repeat: no-repeat; - @include desktop { - background-size: 8px 14px; - background-position: top 15px right 12px; - padding-right: rem(32px); - } -} - -.hub .main-content { - padding-top: rem(140px); - @include desktop { - padding-top: rem(135px); - } - @media (max-width: 320px) { - padding-top: rem(160px); - } -} - -.hub.hub-detail .main-content { - padding-top: rem(200px); - @include desktop { - padding-top: rem(150px); - } -} - -.hub.hub-detail .jumbotron { - height: 350px; - @include desktop { - height: 400px; - } -} - -.hub .main-content-wrapper { - background-color: $light_grey; - - @include desktop { - margin-top: 305px + $desktop_header_height; - } - margin-top: 300px; -} - -.hub-feedback-button { - border: 2px solid #e2e2e2; - color: #A0A0A1; - padding-left: 0; - padding-right: 5rem; - font-size: 1rem; - width: 13rem; - &:after { - bottom: -1px; - } -} - -.hub-flag { - background-image: url($baseurl + "/assets/images/feedback-flag.svg"); - background-size: 15px 20px; - background-position: center right 10px; - background-repeat: no-repeat; -} - -#hub-icons { - height: 2rem; - @media (max-width: 480px) { - position: initial; - padding-left: 0; - padding-top: 1rem; - } -} - -.hub.hub-detail .main-content-wrapper { - @include desktop { - margin-top: 300px + $desktop_header_height; - } - @include small-desktop { - margin-top: 400px + $desktop_header_height; - } - @media (max-width: 320px) { - margin-top: 330px; - } - margin-top: 305px; -} - -.hub .hub-cards-wrapper, .hub-cards-wrapper-right { - margin-bottom: rem(18px); - padding-top: rem(20px); - - .card-body { - .card-summary { - width: 75%; - } - .hub-image { - position: absolute; - top: 0px; - right: 0px; - height: 100%; - width: 25%; - img { - height: 100%; - width: 100%; - } - &:before { - content: ''; - position: absolute; - top: 0; - left: 0; - bottom: 0; - right: 0; - z-index: 1; - background: #000000; - opacity: .075; - } - } - } -} - -.hub .github-stars-count { - color: $mid_gray; - position: relative; - top: rem(4px); - font-size: 14px; - @include desktop { - top: rem(3px); - font-size: initial; - } -} - -.hub .github-stars-count-whole-number { - display: none; -} - -.hub .github-logo { - height: 15px; - width: 13px; -} - -.hub .icon-count-container { - display: inline-block; - vertical-align: text-bottom; - margin-left: rem(8px); -} - -.hub .detail-count { - font-size: rem(20px); -} - -.hub .main-stars-container { - display: flex; -} - -.hub .detail-stars-container { - display: inline-flex; - .github-stars-image { - margin-left: 0; - } -} - -.hub .card-body { - .hub-card-title-container { - width: 75%; - display: inline-flex; - max-width: rem(300px); - .experimental-badge { - text-transform: uppercase; - margin-left: rem(15px); - background-color: #e4e4e4; - color: $not_quite_black; - opacity: 0.75; - font-size: rem(10px); - letter-spacing: 1px; - line-height: rem(22px); - height: rem(20px); - width: rem(96px); - text-align: center; - margin-top: rem(4px); - } - .card-title { - padding-left: 0; - font-size: 1.5rem; - color: #262626; - } - .star-list { - list-style: none; - padding-left: 0; - li { - display: inline; - } - li.github-stars-count-whole-number { - display: none; - } - } - } -} - -.hub .hub-filter-menu { - ul { - list-style-type: none; - padding-left: rem(20px); - li { - padding-right: rem(20px); - word-break: break-all; - - a { - color: $mid_gray; - &:hover { - color: $orange; - } - } - } - } -} - -.hub .hub-filter { - cursor: pointer; -} - -.hub-index { - #dropdownSortLeft { - color: $mid_gray; - cursor: pointer; - z-index: 1; - position: absolute; - top: inherit; - left: 23%; - max-width: 4rem; - @media(min-width: 480px) and (max-width: 590px) { - left: 40%; - } - } -} - -.hub #dropdownFilter, #dropdownSort, #dropdownSortLeft { - color: $mid_gray; - cursor: pointer; - z-index: 1; - position: absolute; - top: 11rem; - right: 1rem; - left: inherit; - @media(min-width: 480px) and (max-width: 590px) { - top: 7rem; - } - @media(min-width: 590px) { - top: 5rem; - } - @include desktop { - top: 5rem; - } -} - -.hub .sort-menu { - left: inherit; - right: 1rem; - top: 12.5rem; - max-width: 12rem; - @media(min-width: 480px) and (max-width: 590px) { - top: 8.5rem; - } - @media(min-width: 590px) and (max-width: 900px) { - top: 6.5rem; - } - @media(min-width: 900px) and (max-width: 1239px) { - top: 6.5rem; - } - @include max-width-desktop { - right: 0; - top: 6.5rem; - } -} - -.hub-index .sort-menu { - left: 23%; - top: inherit; - max-width: 12rem; -} - -.hub .research-hub-title, -.research-hub-sub-title { - text-transform: uppercase; - letter-spacing: 1.78px; - line-height: rem(32px); -} - -.research-hub-sub-title { - padding-bottom: rem(20px); -} - -.hub .research-hub-title { - color: $orange; -} - -.hub .all-models-button, .full-docs-button { - font-size: 1.125rem; - position: relative; - cursor: pointer; - outline: none; - padding: rem(10px) rem(30px) rem(10px) rem(20px); - background-color: $white; - margin-bottom: 0.125rem; - border: 2px solid $light_grey; - letter-spacing: -0.25px; - line-height: rem(28px); - color: $dark_grey; - background-image: url($baseurl + "/assets/images/chevron-right-orange.svg"); - background-size: 6px 13px; - background-position: center right 10px; - background-repeat: no-repeat; - a { - color: $dark_grey; - } - - @include animated_border_hover_state; -} - -.hub .hub-column { - padding-bottom: rem(75px); -} - -.hub.hub-index .hub-column { - padding-bottom: 0; -} - -.hub .how-it-works { - padding-top: rem(50px); - padding-bottom: rem(45px); - .how-it-works-text { - color: $dark_grey; - font-size: rem(20px); - letter-spacing: 0; - line-height: rem(30px); - } - .how-it-works-title-col { - padding-bottom: rem(55px); - } - .full-docs-button { - margin-top: rem(30px); - } -} - -.hub .hub-code-text { - font-size: 80%; - color: $not_quite_black; - background-color: $light_white; - padding: 2px; -} - -.hub .hub-code-block { - display: block; - border-left: 3px solid $orange; - padding: rem(20px) rem(25px) rem(20px) rem(25px); - margin-bottom: rem(60px); -} - -.hub pre.highlight { - background-color: $light_white; - border-left: 2px solid $orange; -} - -.hub code.highlighter-rouge { - background-color: $light_white; -} - -.hub article { - padding-top: rem(20px); - @include desktop { - padding-top: 0; - } - p { - color: $slate; - } -} - -.hub .hub-detail-background { - @include desktop { - height: 515px; - } -} - -.hub .dropdown-menu { - border-radius: 0; - padding-bottom: 0; -} - -.hub .card { - &:hover { - .hub-image:before { - bottom: 100%; - } - } -} - -.hub.hub.hub-detail { - .github-stars-image { - img { - @include desktop { - height: 10px - } - height: 9px - } - } -} - -.hub #development-models-hide, #research-models-hide { - display: none; -} - -.hub .col-md-6.hub-column { - @media (min-width: 768px) { - flex: 0 0 100%; - max-width: 100%; - } - - @include max-width-desktop { - flex: 0 0 50%; - max-width: 50%; - } -} - -.hub .col-md-12.hub-column { - .col-md-6 { - @media (min-width: 768px) { - flex: 0 0 100%; - max-width: 100%; - } - - @include max-width-desktop { - flex: 0 0 100%; - max-width: 50%; - } - } -} - -.hub .featured-image { - padding-bottom: rem(20px); -} - -.hub .coming-soon { - font-weight: 300; - font-style: italic; -} - -.hub.hub-index .jumbotron { - @include desktop { - height: 325px; - } - h1 { - @include desktop { - padding-top: rem(55px); - } - padding-top: 0; - } - p.lead { - padding-top: rem(55px); - } -} - -.hub.hub-index .main-content-wrapper { - @include desktop { - margin-top: 190px + $desktop_header_height; - } - margin-top: 210px; -} - -.hub .page-link { - font-size: rem(20px); - letter-spacing: 0; - line-height: rem(34px); - color: $orange; - width: rem(120px); - text-align: center; -} - -.hub .filter-btn { - color: $mid_gray; - border: 1px solid $mid_gray; - display: inline-block; - text-align: center; - white-space: nowrap; - vertical-align: middle; - padding: 0.375rem 0.75rem; - font-size: 1rem; - line-height: 1.5; - margin-bottom: 5px; - &:hover { - border: 1px solid $orange; - color: $orange; - } -} - -.hub .selected { - border: 1px solid $orange; - background-color: $orange; - color: $white; - &:hover { - color: $white; - } -} - -.hub .all-tag-selected { - background-color: $mid_gray; - color: $white; - &:hover { - border-color: $mid_gray; - color: $white; - } -} - -.hub .pagination { - .page { - border: 1px solid #dee2e6; - padding: 0.5rem 0.75rem; - } - - .active .page { - background-color: #dee2e6; - } -} - -.hub .hub-tags-container { - width: 60%; - &.active { - width: 0; - } -} diff --git a/_sass/jumbotron.scss b/_sass/jumbotron.scss deleted file mode 100644 index 60817bd8fba3..000000000000 --- a/_sass/jumbotron.scss +++ /dev/null @@ -1,73 +0,0 @@ -.jumbotron { - background-color: transparent; - position: absolute; - left: 0; - right: 0; - margin-right: auto; - margin-left: auto; - padding: 0; - margin-bottom: 0; - display: flex; - align-items: center; - top: $mobile_header_height; - - @include desktop { - height: 550px; - top: $desktop_header_height; - } - - .jumbotron-content { - display: flex; - align-items: center; - } - - .lead { - font-weight: 400; - letter-spacing: 0.25px; - font-size: 20px; - line-height: 1.2; - @include desktop { - font-size: 29px; - } - } - - h1 { - font-size: rem(32px); - text-transform: uppercase; - font-weight: lighter; - letter-spacing: 1.08px; - margin-bottom: rem(10px); - line-height: 1.05; - margin-top: 4rem; - - @include desktop { - font-size: rem(62px); - margin-top: 0; - } - - img { - margin-bottom: 1rem; - } - } - - p { - font-size: rem(18px); - margin-bottom: rem(20px); - @include full-nav-menu-desktop { - width: 50%; - } - } - - &.on-dark-background { - h1, p { - color: $white; - } - } - - .btn { - padding-top: rem(9px); - @include desktop { - margin-top: rem(10px); - } - } -} diff --git a/_sass/main-content.scss b/_sass/main-content.scss deleted file mode 100644 index 42e4f6ae8e0e..000000000000 --- a/_sass/main-content.scss +++ /dev/null @@ -1,36 +0,0 @@ -.main-content-wrapper { - margin-top: 300px; - - @include desktop { - margin-top: 450px + $desktop_header_height; - min-height: 400px; - } -} - -.main-content { - padding-top: rem(24px); - padding-bottom: rem(24px); - - @include desktop { - padding-top: 2.625rem; - } -} - -.main-content-menu { - margin-bottom: rem(20px); - @include desktop { - margin-bottom: rem(80px); - } - - .navbar-nav .nav-link { - color: $slate; - padding-left: rem(30px); - padding-right: rem(30px); - - @include desktop { - &:first-of-type { - padding-left: 0; - } - } - } -} diff --git a/_sass/mobile.scss b/_sass/mobile.scss deleted file mode 100644 index 58b659399681..000000000000 --- a/_sass/mobile.scss +++ /dev/null @@ -1,133 +0,0 @@ -.mobile article { - margin-bottom: rem(80px); -} - -.mobile .main-background { - height: 275px; - - @include desktop { - height: 380px; - } -} - -.mobile .main-content-wrapper { - margin-top: 275px; - @include desktop { - margin-top: 260px + $desktop_header_height; - } -} - -.mobile .jumbotron { - height: 190px; - @include desktop { - height: 260px; - } -} - -.mobile .main-content .navbar { - background-color: $light_grey; - - padding-left: 0; - padding-bottom: 0; - padding-top: 0; - - @media (min-width: 992px) { - li:first-of-type { - padding-left: rem(55px); - } - - .nav-item { - padding: 2rem; - cursor: pointer; - } - - .nav-link { - position: relative; - top: 10%; - transform: translateY(-50%); - } - } - - .nav-select { - background-color: $white; - .nav-link { - color: $orange; - font-weight: 500; - } - } - - .nav-link { - font-size: rem(18px); - color: #8c8c8c; - @include desktop { - margin-left: rem(30px); - } - &:hover { - color: $orange; - } - } - - .nav-item { - padding-top: rem(15px); - padding-bottom: rem(15px); - @include desktop { - padding-bottom: 0; - padding-top: 2rem; - } - @include small-desktop { - padding-bottom: 0; - padding-top: 2rem; - } - @media (max-width: 990px) { - padding-bottom: rem(10px); - padding-top: 1rem; - } - } - - .navbar-toggler { - margin-left: rem(40px); - } -} - -.mobile .main-content { - padding-top: 0; - @include desktop { - padding-top: 1.9rem; - } -} - -.mobile .nav-menu-wrapper { - background-color: $light_grey; -} - -.mobile .navbar-nav { - flex-direction: row; -} - -.mobile .mobile-page-sidebar { - padding-top: rem(40px); - padding-bottom: rem(40px); - top: 15%; - - @include desktop { - padding-top: 0; - } - - ul { - padding-left: 0; - } - - li { - list-style-type: none; - line-height: 23px; - margin-bottom: 15px; - - a { - color: #8c8c8c; - &.active, - &:hover { - color: $orange; - } - } - } -} diff --git a/_sass/navigation.scss b/_sass/navigation.scss deleted file mode 100644 index 420978c613c1..000000000000 --- a/_sass/navigation.scss +++ /dev/null @@ -1,443 +0,0 @@ -.header-holder { - height: $mobile_header_height; - - @include full-nav-menu-desktop { - height: $desktop_header_height - 20px; - } - - align-items: center; - display: flex; - left: 0; - margin-left: auto; - margin-right: auto; - position: fixed; - right: 0; - top: 0; - @include full-nav-menu-desktop { - top: 32px; - } - width: 100%; - z-index: 9999; - - &.blog-header, - &.blog-detail-header, - &.resources-header, - &.get-started-header, - &.features-header, - &.comm-stories-header, - &.ecosystem-header, - &.announcement-header, - &.hub-header, - &.mobile-header { - background-color: $white; - border-bottom: 1px solid #e2e2e2; - } -} - -.hello-bar { - display: none; - @include full-nav-menu-desktop { - background-color: #CC2F90; - color: $white; - display: flex; - letter-spacing: .34px; - justify-content: center; - padding: 4px 0; - position: fixed; - top: 0; - text-align: center; - z-index: 9999; - margin-left: auto; - margin-right: auto; - width: 100%; - a { - color: $white; - text-decoration: underline; - } - } -} - -.header-container { - position: relative; - display: flex; - align-items: center; - @include clearfix; - - @include full-nav-menu-desktop { - display: block; - } -} - -.header-logo { - height: 23px; - width: 93px; - background-image: url($baseurl + "/assets/images/logo.svg"); - background-repeat: no-repeat; - background-size: 93px 23px; - display: block; - float: left; - - @include full-nav-menu-desktop { - background-size: 108px 27px; - position: absolute; - height: 27px; - width: 108px; - top: 4px; - float: none; - } -} - -.main-menu-open-button { - background-image: url($baseurl + "/assets/images/icon-menu-dots.svg"); - background-position: center center; - background-size: 25px 7px; - background-repeat: no-repeat; - width: 25px; - height: 7px; - position: absolute; - right: 0; - top: 4px; - @include full-nav-menu-desktop { - display: none; - } -} - -.header-holder .main-menu { - display: none; - - @include full-nav-menu-desktop { - display: flex; - align-items: center; - justify-content: flex-end; - } - - ul { - display: flex; - align-items: center; - margin: 0; - } - - ul li { - display: inline-block; - margin-right: 34px; - position: relative; - - &.active { - &:after { - content: "‱"; - bottom: -24px; - color: $orange; - font-size: rem(22px); - left: 0; - position: absolute; - right: 0; - text-align: center; - } - - a { - color: $orange; - } - - .with-down-arrow { - background-image: url($baseurl + "/assets/images/chevron-down-orange.svg"); - } - } - - &.resources-active:after { - left: -27px; - } - - &:last-of-type { - margin-right: 0; - } - } - - ul li a { - color: $white; - font-size: 1.2rem; - letter-spacing: 0; - line-height: rem(34px); - text-align: center; - text-decoration: none; - padding-bottom: 10px; - @include full-nav-menu-desktop { - &:hover { - color: #ffffff; - border-bottom: 2px solid #ffffff; - } - } - - &.with-down-arrow { - cursor: default; - padding-right: rem(32px); - position: relative; - background-image: url($baseurl + "/assets/images/chevron-down-white.svg"); - background-size: 14px 18px; - background-position: top 7px right 10px; - background-repeat: no-repeat; - padding-bottom: 20px; - &:hover { - border-bottom: none; - } - - .dropdown-menu { - border-radius: 0; - padding: 0; - - .dropdown-item { - color: $dark_grey; - border-bottom: 1px solid #e2e2e2; - &:last-of-type { - border-bottom-color: transparent; - } - - &:hover { - background-color: $orange; - } - - p { - font-size: rem(16px); - color: #757575; - } - } - - a.dropdown-item { - &:hover { - color: $white; - p { - color: $white; - } - } - } - } - } - } -} - -.mobile-main-menu { - display: none; - &.open { - background-color: $slate; - display: block; - height: 100%; - left: 0; - margin-left: auto; - margin-right: auto; - min-height: 100%; - position: fixed; - right: 0; - top: 0; - width: 100%; - z-index: 99999; - } -} - -.mobile-main-menu .container-fluid { - background-color: inherit; - align-items: center; - display: flex; - height: $mobile_header_height; - position: relative; - @include clearfix; - z-index: 1; -} - -.mobile-main-menu.open { - ul { - list-style-type: none; - padding: 0; - } - - ul li a, .resources-mobile-menu-title { - font-size: rem(32px); - color: $white; - letter-spacing: 0; - line-height: rem(64px); - } - - ul li.active a { - color: $orange; - } -} - -.main-menu-close-button { - background-image: url($baseurl + "/assets/images/icon-close.svg"); - background-position: center center; - background-repeat: no-repeat; - background-size: 24px 24px; - height: 24px; - position: absolute; - right: 0; - width: 24px; - top: -4px; -} - -.mobile-main-menu-header-container { - position: relative; -} - -.mobile-main-menu-links-container { - display: flex; - padding-left: rem(45px); - height: 100%; - min-height: 100%; - margin-top: 20px; - overflow-y: scroll; - @media only screen and (max-width: 320px) { - .main-menu { - padding-top: 5rem; - } - } - - .navSearchWrapper { - @media only screen and (max-width: 320px) { - width: 75%; - } - } -} - -#topnav-gh-icon { - background-image: url(/assets/social/github-white.svg); - color: white; - width: 33px; - height: 33px; - background-size: 23px 23px; - background-repeat: no-repeat; - background-position: 5px 4px; - border-radius: 25px; - &:hover { - background-color:#88888833; - } -} - -.blog-header, -.blog-detail-header, -.resources-header, -.get-started-header, -.features-header, -.ecosystem-header, -.announcement-header, -.comm-stories-header, -.hub-header, -.mobile-header { - .header-logo { - background-image: url($baseurl + "/assets/images/logo-dark.svg"); - } - - .main-menu ul li a { - color: $not_quite_black; - @include full-nav-menu-desktop { - &:hover { - color: $not_quite_black; - border-bottom: 2px solid $not_quite_black; - } - } - &.with-down-arrow { - background-image: url($baseurl + "/assets/images/chevron-down-black.svg"); - } - } - - .main-menu-open-button { - background-image: url($baseurl + "/assets/images/icon-menu-dots-dark.svg"); - } - - #topnav-gh-icon { - background-image: url(/assets/social/github-black.svg); - } -} - -.ecosystem-dropdown-menu, .resources-dropdown-menu { - left: -25px; - width: 300px; - display: none; - position: absolute; - z-index: 1000; - display: none; - top: 45px; - float: left; - min-width: 10rem; - padding: 0.5rem 0; - font-size: 1rem; - color: #212529; - text-align: left; - list-style: none; - background-color: $white; - background-clip: padding-box; - border: 1px solid rgba(0, 0, 0, 0.15); - border-radius: 0.25rem; -} - -.ecosystem-dropdown:hover, .resources-dropdown:hover, .resources-active:hover { - .ecosystem-dropdown-menu, .resources-dropdown-menu { - display: block; - } -} - -.main-menu ul li { - .ecosystem-dropdown-menu, .resources-dropdown-menu { - border-radius: 0; - padding: 0; - } -} - -.main-menu ul li { - .ecosystem-dropdown-menu, .resources-dropdown-menu { - .dropdown-item { - color: #6c6c6d; - border-bottom: 1px solid #e2e2e2; - } - } -} - -.header-holder .main-menu ul li a.nav-dropdown-item { - display: block; - font-size: rem(16px); - line-height: rem(21px); - width: 100%; - padding: 0.25rem 1.5rem; - clear: both; - font-weight: 400; - color: #757575; - text-align: left; - background-color: transparent; - border-bottom: 1px solid #e2e2e2; - p { - margin-bottom: .5rem; - } - &:last-of-type { - border-bottom-color: transparent; - } - &:hover { - background-color: $orange; - color: white; - } - .dropdown-title { - font-size: rem(18px); - color: #212529; - letter-spacing: 0; - line-height: 34px; - } - .docs-title { - display: block; - padding-top: 0.5rem; - } -} - -.header-holder .main-menu ul li a.nav-dropdown-item:hover .dropdown-title { - background-color: $orange; - color: white; -} - -.mobile-main-menu-links-container { - ul.resources-mobile-menu-items { - li { - padding-left: 15px; - a { - font-size: rem(24px); - line-height: rem(48px); - } - } - } -} - - diff --git a/_sass/quick-start-module.scss b/_sass/quick-start-module.scss deleted file mode 100644 index 884df6705cbd..000000000000 --- a/_sass/quick-start-module.scss +++ /dev/null @@ -1,435 +0,0 @@ -.quick-starts { - background: $light_grey; - - .col-md-2-4 { - position: relative; - width: 100%; - min-height: 1px; - padding-right: 15px; - padding-left: 15px; - } - - @media (min-width: 768px) { - .col-md-2-4 { - -webkit-box-flex: 0; - -ms-flex: 0 0 20%; - flex: 0 0 20%; - max-width: 20%; - } - } - - .start-locally-col { - margin-bottom: rem(20px); - .row.ptbuild, - .row.os, - .row.package, - .row.language, - .row.cuda { - margin-bottom: rem(20px); - @include desktop { - margin-bottom: 0; - } - } - - @include small-desktop { - flex: 0 0 100%; - max-width: 100%; - } - - @include desktop { - margin-bottom: rem(40px); - - .row { - margin-bottom: 0; - } - } - - @include max-width-desktop { - margin-bottom: 0; - } - - pre { - font-size: 80% !important; - background-color: #ffffff !important; - } - - .prev-versions-btn { - margin-top: 30px; - } - } - - .cloud-options-col { - @include small-desktop { - flex: 0 0 100%; - max-width: 100%; - margin-left: 0; - margin-top: rem(20px); - } - } - - p { - font-size: rem(18px); - line-height: rem(28px); - } - - .card-body { - flex: 1 1 auto; - } - - .cloud-option-image { - margin-left: rem(15px); - margin-right: rem(25px); - margin-bottom: rem(5px); - } - - .cloud-option-row { - margin-left: 0; - cursor: pointer; - } - - .option { - border: 2px solid $light_grey; - font-size: rem(16px); - color: $quick_start_grey; - letter-spacing: -0.22px; - line-height: rem(20px); - background: $white; - cursor: pointer; - } - - .option:hover { - background-color: $orange; - color: $white; - } - - .selected { - background-color: $orange; - color: $white; - } - - .block { - margin-bottom: rem(1px); - height: rem(40px); - display: flex; - align-items: center; - } - - .title-block { - margin: rem(1px); - height: rem(40px); - border: 2px solid $light_grey; - font-size: rem(16px); - color: $quick_start_grey; - line-height: rem(20px); - display: flex; - align-items: center; - } - - .title-block:before { - display: block; - content: "."; - color: transparent; - border-left: 2px solid $smoky_grey; - height: 100%; - position: absolute; - left: 0; - } - - #command { - color: #4a4a4a; - background-color: $white; - padding: rem(15px); - border: 2px solid $light_grey; - word-wrap: break-word; - display: table-cell; - vertical-align: middle; - - a { - font-size: 125%; - - @include desktop { - &:hover { - color: $orange; - } - } - } - - pre { - word-break: break-all; - white-space: normal; - } - } - - .command-container { - display: table; - width: 100%; - @include desktop { - min-height: rem(84px); - } - pre { - margin-bottom: 0px; - padding: 0px; - font-size: 75%; - background-color: #f3f4f7; - } - } - - .command-block { - height: rem(84px); - word-wrap: break-word; - color: $command_block_black; - } - - .command-block:before { - border-left: 2px solid $black; - } - - .quick-start-link { - color: $quick_start_grey; - } - - .mobile-heading { - @include desktop { - display: none; - } - display: flex; - align-items: center; - font-weight: 400; - } - - .command-mobile-heading { - @include desktop { - display: none; - } - display: flex; - align-items: center; - font-weight: 400; - color: $black; - } - - .headings { - display: none; - @include desktop { - display: block; - } - } - - .cloud-options-col { - margin-top: rem(20px); - @include desktop { - margin-top: 0; - } - } - - @media (max-width: 978px) { - .os-text { - margin-top: 0; - } - } -} - -.quick-start-guides { - font-size: rem(18px); - letter-spacing: 0.25px; - line-height: rem(36px); - color: $medium_smoky_grey; - - .select-instructions { - color: $slate; - border-bottom: 2px solid $medium_smoky_grey; - margin-bottom: rem(16px); - display: inline-block; - @include desktop { - margin-bottom: 0; - } - } -} - -.quick-start-module { - .option-module { - float: right; - } - - padding-top: rem(40px); - padding-bottom: rem(40px); - - @include desktop { - padding-top: rem(64px); - padding-bottom: rem(66px); - } - - p { - color: $dark_grey; - font-size: 1.125em; - letter-spacing: 0.25px; - padding-bottom: rem(15px); - margin-bottom: 1.4rem; - } - - h3 { - font-size: rem(24px); - letter-spacing: 1.33px; - line-height: rem(32px); - text-transform: uppercase; - margin-bottom: 2.1rem; - } -} - -.quick-starts .cloud-option-body { - display: flex; - align-items: center; - height: 64px; - padding: 0 0 0 rem(80px); - - @include animated_border_hover_state; - - @include desktop { - padding-right: rem(32px); - } - - @include small-desktop { - padding-right: rem(20px); - } - - position: relative; - background-image: url($baseurl + "/assets/images/chevron-right-orange.svg"); - background-size: 6px 13px; - background-position: center right 15px; - background-repeat: no-repeat; - - @include desktop { - background-size: 8px 14px; - } - - &:before { - opacity: 0.5; - position: absolute; - left: rem(30px); - top: 21px; - } - - &.aws:before { - content: url($baseurl + "/assets/images/aws-logo.svg"); - } - - &.microsoft-azure:before { - content: url($baseurl + "/assets/images/microsoft-azure-logo.svg"); - } - - &.lightning-studios:before { - content: url($baseurl + "/assets/images/lightning-studios-logo.svg"); - } - - &.google-cloud:before { - content: url($baseurl + "/assets/images/google-cloud-logo.svg"); - } - - &.colab:before { - content: url($baseurl + "/assets/images/colab-logo.svg"); - } - - @include desktop { - &:hover:before { - opacity: 1; - } - } -} - -.quick-starts .cloud-option { - background-color: $white; - margin-bottom: rem(2px); - border: 2px solid $light_grey; - font-size: rem(18px); - letter-spacing: -0.25px; - line-height: rem(30px); - color: $not_quite_black; - - #microsoft-azure { - p{ - color: $not_quite_black; - margin: 0; - padding: 0; - font-size: inherit; - line-height: 1.3rem; - } - span { - margin-bottom: 0; - padding-bottom: 0; - color: $orange; - padding: 0px 35px 0px 8px; - font-style: italic; - line-height: 1.3rem; - } - } - - @include small-desktop { - font-size: rem(16px); - } - - ul { - display: none; - width: 100%; - margin: 0 0 rem(20px) 0; - padding: 0; - - li { - margin-top: 0; - position:relative; - padding-left: rem(80px); - - @include small-desktop { - font-size: rem(16px); - } - - a { - color: $quick_start_grey; - letter-spacing: -0.25px; - line-height: 30px; - - @include desktop { - &:hover { - color: $orange; - } - } - } - - @include desktop { - &:hover:before { - content: "\2022"; - color: $orange; - position: absolute; - left: 36px; - } - } - - &:first-of-type { - margin-top: rem(20px); - } - } - } - - &.open { - .cloud-option-body { - background-image: url($baseurl + "/assets/images/chevron-down-orange.svg"); - background-size: 14px 14px; - border-bottom: 1px solid $orange; - color: $not_quite_black; - - @include desktop { - border-bottom: none; - } - - &:after { - width: 100%; - } - - &:before { - opacity: 1; - } - } - - ul { - display: block; - } - } -} diff --git a/_sass/resources.scss b/_sass/resources.scss deleted file mode 100644 index 2cd925bab3b6..000000000000 --- a/_sass/resources.scss +++ /dev/null @@ -1,29 +0,0 @@ -.resources .jumbotron { - align-items: flex-end; - color: $white; - height: 220px; - @include desktop { - height: 300px; - } - h1 { - padding-top: rem(135px); - } - p.lead { - margin-bottom: rem(25px); - padding-top: rem(20px); - } -} - -.resources .main-content-wrapper { - margin-top: 385px; - margin-bottom: 0.75rem; - @include desktop { - margin-top: 475px; - } -} - -.resources .resource-card { - @include desktop { - margin-bottom: rem(36px); - } -} diff --git a/_sass/search.scss b/_sass/search.scss deleted file mode 100644 index fa9a119aad42..000000000000 --- a/_sass/search.scss +++ /dev/null @@ -1,365 +0,0 @@ -/* Search */ -input[type='search'] { - -moz-appearance: none; - -webkit-appearance: none; -} - -.navSearchWrapper { - align-items: center; - align-self: center; - display: flex; - justify-content: center; - position: relative; - right: 10px; - top: 15px; - margin-left: 0; - padding-bottom: 20px; - @include desktop { - position: absolute; - margin-left: 30px; - display: block; - padding-left: 3px; - padding-bottom: 0; - } -} - -.tabletSearchWrapper { - top: 0px; - - @include small-desktop { - padding-bottom: 20px; - position: relative; - margin-left: 0; - } -} - -.navSearchWrapper .aa-dropdown-menu { - background: #f9f9f9; - border: 3px solid rgba(57, 57, 57, 0.25); - color: #393939; - font-size: rem(14px); - left: auto !important; - line-height: 1.2em; - right: 0 !important; -} - -.navSearchWrapper - .aa-dropdown-menu - .algolia-docsearch-suggestion--category-header { - background: $black; - color: white; - font-size: rem(14px); - font-weight: 400; -} - -.navSearchWrapper - .aa-dropdown-menu - .algolia-docsearch-suggestion--category-header - .algolia-docsearch-suggestion--highlight { - background-color: $black; - color: #fff; -} - -.navSearchWrapper - .aa-dropdown-menu - .algolia-docsearch-suggestion--title - .algolia-docsearch-suggestion--highlight, -.navSearchWrapper - .aa-dropdown-menu - .algolia-docsearch-suggestion--subcategory-column - .algolia-docsearch-suggestion--highlight { - color: $black; -} - -.navSearchWrapper .aa-dropdown-menu .algolia-docsearch-suggestion__secondary, -.navSearchWrapper - .aa-dropdown-menu - .algolia-docsearch-suggestion--subcategory-column { - border-color: rgba(57, 57, 57, 0.3); -} - -.navSearchWrapper .algolia-autocomplete .algolia-docsearch-suggestion--subcategory-column { - @include desktop { - word-wrap: normal; - } -} - -input#search-input { - background-color: inherit; - border: none; - border-radius: 20px; - color: $black; - font-size: rem(18px); - font-weight: 300; - line-height: 20px; - outline: none; - padding-left: 25px; - position: relative; - -webkit-transition: 0.5s width ease; - -moz-transition: 0.5s width ease; - -o-transition: 0.5s width ease; - transition: 0.5s width ease; - display: none; - width: 220px; - background-image: url($baseurl + "/assets/images/search-icon.svg"); - background-size: 12px 15px; - background-repeat: no-repeat; - background-position: 8px 5px; - &:hover { - background-image: url($baseurl + "/assets/images/search-icon-orange.svg"); - } -} - -input#mobile-search-input { - font-size: 2rem; - background-color: transparent; - color: $white; - border: none; - outline: none; - padding-left: 25px; - position: relative; - border-top-left-radius: 20px; - border-bottom-left-radius: 20px; - width: 300px; - display: block; -} - -input#search-input:focus, -input#search-input:active { - color: $black; -} -.navigationSlider .slidingNav .navSearchWrapper .algolia-docsearch-footer a { - height: auto; -} -@media only screen and (max-width: 735px) { - .navSearchWrapper { - width: 100%; - } -} - -input::-webkit-input-placeholder { - color: #e5e5e5; -} - -input::-moz-placeholder { - color: #e5e5e5; -} - -input::placeholder { - color: #e5e5e5; -} - -.hljs { - padding: 1.25rem 1.5rem; -} - -@media only screen and (max-width: 1024px) { - .reactNavSearchWrapper input#search-input { - background-color: rgba(242, 196, 178, 0.25); - border: none; - border-radius: 20px; - box-sizing: border-box; - color: #393939; - font-size: rem(14px); - line-height: 20px; - outline: none; - padding-left: 25px; - position: relative; - transition: background-color 0.2s cubic-bezier(0.68, -0.55, 0.265, 1.55), - width 0.2s cubic-bezier(0.68, -0.55, 0.265, 1.55), color 0.2s ease; - width: 100%; - } - - .reactNavSearchWrapper input#search-input:focus, - .reactNavSearchWrapper input#search-input:active { - background-color: $black; - color: #fff; - } - - .reactNavSearchWrapper .algolia-docsearch-suggestion--subcategory-inline { - display: none; - } - - .reactNavSearchWrapper > span { - width: 100%; - } - - .reactNavSearchWrapper .aa-dropdown-menu { - font-size: rem(12px); - line-height: 2em; - padding: 0; - border-width: 1px; - min-width: 500px; - } - .reactNavSearchWrapper .algolia-docsearch-suggestion__secondary { - border-top: none; - } - .aa-suggestions { - min-height: 140px; - max-height: 60vh; - -webkit-overflow-scrolling: touch; - overflow-y: scroll; - } -} - -@media only screen and (min-width: 1024px) { - .navSearchWrapper { - padding-left: 10px; - position: relative; - right: auto; - top: auto; - @include desktop { - padding-left: 3px; - right: 10px; - margin-left: 0; - } - } - - .navSearchWrapper .algolia-autocomplete { - display: block; - } - - .tabletSearchWrapper { - right: 10px; - } -} - -@media only screen and (max-width: 735px) { - .reactNavSearchWrapper .aa-dropdown-menu { - min-width: 400px; - } -} -@media only screen and (max-width: 475px) { - .reactNavSearchWrapper .aa-dropdown-menu { - min-width: 300px; - } -} - -.search-border { - display: none; - flex-direction: row; - border: none; - background-color: transparent; - border-radius: 20px; - width: 100%; - float: right; - @include desktop { - display: flex; - } -} - -.mobile-search-border { - flex-direction: row; - border: none; - background-color: rgba(256, 256, 256, 0.1); - border-radius: 20px; - width: 100%; - float: right; - display: flex; - @include small-desktop { - border-radius: 25px; - } -} - -#close-search { - color: $orange; - padding-right: 10px; - font-size: .99em; - display: none; - cursor: pointer; -} - -.active-header { - margin-top: -1px; -} - -.active-search-icon { - background-image: url($baseurl + "/assets/images/search-icon-orange.svg") !important; - display: inline-block !important; -} - -.active-background { - background-color: $light_grey; - width: 50%; - padding: 4px; -} - -.homepage-header { - input#search-input { - background-image: url($baseurl + "/assets/images/search-icon-white.svg"); - color: $white; - } - input#search-input:focus, - input#search-input:active { - color: $white; - } - .active-background { - background-color:#88888833; - } - #close-search { - color: $white; - opacity: 0.5; - &:hover { - color: $orange; - } - } - #search-icon { - background-image: url(/assets/images/search-icon-white.svg); - &:hover { - background-color:#88888833; - } - } -} - -#search-icon { - background-image: url(/assets/images/search-icon.svg); - color: transparent; - width: 33px; - height: 33px; - background-size: 21px 21px; - background-repeat: no-repeat; - background-position: 6px 5px; - border-radius: 25px; - cursor: pointer; - &:hover { - background-color: $light_grey; - } -} - -#mobile-search-icon { - background-image: url(/assets/images/search-icon-white.svg); - width: 30px; - height: 38px; - background-size: 16px 28px; - background-repeat: no-repeat; - background-position: 0px 5px; - cursor: pointer; - border-top-right-radius: 20px; - border-bottom-right-radius: 20px; - @include small-desktop { - height: 50px; - width: 35px; - background-size: 20px 42px; - } -} - -.navSearchWrapper { - .algolia-autocomplete .ds-dropdown-menu { - min-width: 330px; - height: 500px; - overflow-y: scroll; - @include desktop { - height: auto; - min-width: 700px; - overflow-y: hidden; - } - @include small-desktop { - height: 700px; - overflow-y: scroll; - } - @media (min-width: 769px) and (max-width: 1024px) { - min-width: 950px; - } - } -} -/* End of Search */ diff --git a/_sass/similar-posts-module.scss b/_sass/similar-posts-module.scss deleted file mode 100644 index 71d804f22bee..000000000000 --- a/_sass/similar-posts-module.scss +++ /dev/null @@ -1,55 +0,0 @@ -.similar-posts-module { - background: $light_grey; - - p.blog-date { - font-size: rem(18px); - color: $very_dark_grey; - letter-spacing: 0; - line-height: rem(24px); - } - - h4 { - a { - font-family: FreightSans; - font-size: rem(24px); - color: $black; - letter-spacing: 0; - line-height: rem(32px); - font-weight: 400; - } - } - - .module-content { - .navbar-nav { - margin-top: rem(60px); - } - - .module-heading { - text-transform: uppercase; - color: $black; - font-size: rem(24px); - letter-spacing: rem(1.33px); - line-height: rem(32px); - font-weight: 400; - } - - .nav-item:last-of-type { - @include desktop { - position: absolute; - right: 0; - a { - margin-right: 0; - } - } - } - margin-bottom: rem(35px); - } - - .see-more-posts { - color: $black; - font-size: rem(18px); - letter-spacing: -0.25px; - line-height: rem(30px); - top: rem(2px); - } -} diff --git a/_sass/syntax-highlighting.scss b/_sass/syntax-highlighting.scss deleted file mode 100644 index 36b42a5bb8f6..000000000000 --- a/_sass/syntax-highlighting.scss +++ /dev/null @@ -1,211 +0,0 @@ -/*Github syntax highlighting theme via Rouge*/ - -.highlight table td { padding: 5px; } -.highlight table pre { margin: 0; } -.highlight .cm { - color: #999988; - font-style: italic; -} -.highlight .cp { - color: #999999; - font-weight: bold; -} -.highlight .c1 { - color: #999988; - font-style: italic; -} -.highlight .cs { - color: #999999; - font-weight: bold; - font-style: italic; -} -.highlight .c, .highlight .cd { - color: #8c8c8c; - font-style: italic; -} -.highlight .err { - color: #a61717; - background-color: #e3d2d2; -} -.highlight .gd { - color: #000000; - background-color: #ffdddd; -} -.highlight .ge { - color: #000000; - font-style: italic; -} -.highlight .gr { - color: #aa0000; -} -.highlight .gh { - color: #999999; -} -.highlight .gi { - color: #000000; - background-color: #ddffdd; -} -.highlight .go { - color: #888888; -} -.highlight .gp { - color: #555555; -} -.highlight .gs { - font-weight: bold; -} -.highlight .gu { - color: #aaaaaa; -} -.highlight .gt { - color: #aa0000; -} -.highlight .kc { - color: #000000; - font-weight: bold; -} -.highlight .kd { - color: #000000; - font-weight: bold; -} -.highlight .kn { - color: #000000; - font-weight: bold; -} -.highlight .kp { - color: #000000; - font-weight: bold; -} -.highlight .kr { - color: #000000; - font-weight: bold; -} -.highlight .kt { - color: #445588; - font-weight: bold; -} -.highlight .k, .highlight .kv { - color: #000000; - font-weight: bold; -} -.highlight .mf { - color: #009999; -} -.highlight .mh { - color: #009999; -} -.highlight .il { - color: #009999; -} -.highlight .mi { - color: #009999; -} -.highlight .mo { - color: #009999; -} -.highlight .m, .highlight .mb, .highlight .mx { - color: #009999; -} -.highlight .sb { - color: #d14; -} -.highlight .sc { - color: #d14; -} -.highlight .sd { - color: #d14; -} -.highlight .s2 { - color: #d14; -} -.highlight .se { - color: #d14; -} -.highlight .sh { - color: #d14; -} -.highlight .si { - color: #d14; -} -.highlight .sx { - color: #d14; -} -.highlight .sr { - color: #009926; -} -.highlight .s1 { - color: #d14; -} -.highlight .ss { - color: #990073; -} -.highlight .s { - color: #d14; -} -.highlight .na { - color: #008080; -} -.highlight .bp { - color: #999999; -} -.highlight .nb { - color: #0086B3; -} -.highlight .nc { - color: #445588; - font-weight: bold; -} -.highlight .no { - color: #008080; -} -.highlight .nd { - color: #3c5d5d; - font-weight: bold; -} -.highlight .ni { - color: #800080; -} -.highlight .ne { - color: #990000; - font-weight: bold; -} -.highlight .nf { - color: #990000; - font-weight: bold; -} -.highlight .nl { - color: #990000; - font-weight: bold; -} -.highlight .nn { - color: #555555; -} -.highlight .nt { - color: #000080; -} -.highlight .vc { - color: #008080; -} -.highlight .vg { - color: #008080; -} -.highlight .vi { - color: #008080; -} -.highlight .nv { - color: #008080; -} -.highlight .ow { - color: #000000; - font-weight: bold; -} -.highlight .o { - color: #000000; - font-weight: bold; -} -.highlight .w { - color: #bbbbbb; -} -.highlight { - background-color: #f8f8f8; -} diff --git a/_sass/videos.scss b/_sass/videos.scss deleted file mode 100644 index 9264c04a95d6..000000000000 --- a/_sass/videos.scss +++ /dev/null @@ -1,21 +0,0 @@ -.video-item { - margin-bottom: 5rem; - - a h5 { - color: $black; - margin-top: 1rem; - } - a:hover { - h5 { - color: $orange; - } - } - - .image-container { - overflow: hidden; - img { - margin: -10% 0; - width: 100%; - } - } -} \ No newline at end of file diff --git a/_style_guide/article.md b/_style_guide/article.md deleted file mode 100644 index 603e7814d5ba..000000000000 --- a/_style_guide/article.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -layout: default -title: Base Style Guide ---- - -## Header 2 -This is body copy. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea. - -### Header 3 - -This is body copy. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea. - -#### Header 4 - -This is body copy. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea. - -##### Header 5 - -This is body copy. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea. - ---- - -This is more body copy with `code snippets`. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. [Here is an inline link](#). Ut enim ad minim veniam, quis nostrud `torch.*.FloatTensor` ullamco laboris nisi ut aliquip ex ea commodo consequat. - -_This is italicized body copy. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat_ - -**This is bolded body copy. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.** - ---- - -This is body copy before an unordered list. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea. - -- Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. -- Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. -- Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. - -This is body copy after an unordered list. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea. - -1. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. -2. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. -3. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. - -This is body copy after an ordered list. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea. - -
        -
        Definition list
        -
        Lorem ipsum dolor sit amet, consectetur adipiscing elit
        - -
        Definition list
        -
        Lorem ipsum dolor sit amet, consectetur adipiscing elit
        - -
        Definition list
        -
        Lorem ipsum dolor sit amet, consectetur adipiscing elit
        -
        - ---- - -![Here's an image](https://via.placeholder.com/1000x200/e44c2c/ffffff "Sample image") - ---- - -> "This is a blockquote. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat" - -```sh - brew install pytorch # Here is a small code block - brew install pytorch # Here is a small code block -``` - -```python -# Here is a large code block with syntax highlighting - -# !/usr/bin/python3 - -# Dictionaries map keys to values. - -fred = { 'mike': 456, 'bill': 399, 'sarah': 521 } - -# Subscripts. -try: - print(fred) - print(fred['bill']) - print(fred['nora']) - print("Won't see this!") -except KeyError as rest: - print("Lookup failed:", rest) -print() - -# Entries can be added, udated, or deleted. -fred['bill'] = 'Sopwith Camel' -fred['wilma'] = 2233 -del fred['mike'] -print(fred) -print() - -# Get all the keys. -print(fred.keys()) -for k in fred.keys(): - print(k, "=>", fred[k]) -print() - -# Test for presence of a key. -for t in [ 'zingo', 'sarah', 'bill', 'wilma' ]: - print(t,end=' ') - if t in fred: - print('=>', fred[t]) - else: - print('is not present.') -``` - -Here is a table: - -| Data | type torch.dtype | Tensor types | -|------|------------------|--------------| -| 32-bit floating point | `torch.float32` or `torch.float` | `torch.*.FloatTensor` -| 64-bit floating point | `torch.float64` or `torch.double` | `torch.*.DoubleTensor` -| 16-bit floating point | `torch.float16` or `torch.half` | `torch.*.HalfTensor` -| 8-bit integer (unsigned) | `torch.uint8` | `torch.*.ByteTensor` -| 8-bit integer (signed) | `torch.int8` | `torch.*.CharTensor` -| 16-bit integer (signed) | `torch.int16` or `torch.short` | `torch.*.ShortTensor` -| 32-bit integer (signed) | `torch.int32` or `torch.int` | `torch.*.IntTensor` -| 64-bit integer (signed) | `torch.int64` or `torch.long` | `torch.*.LongTensor` - diff --git a/_videos/pt20qa1.md b/_videos/pt20qa1.md deleted file mode 100644 index e6b641b68485..000000000000 --- a/_videos/pt20qa1.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'PyTorch 2.0 Q&A Series: How and why you should contribute to tutorials and code to PyTorch' -youtube_id: v4nDZTK_eJg -date: Dec 16, 2022 ---- diff --git a/_videos/pt20qa10.md b/_videos/pt20qa10.md deleted file mode 100644 index a3722e3e89b5..000000000000 --- a/_videos/pt20qa10.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'PyTorch 2.0 Q&A: Dynamic Shapes and Calculating Maximum Batch Size' -youtube_id: 4dX4kuVbl9U -date: Feb 8, 2023 ---- diff --git a/_videos/pt20qa11.md b/_videos/pt20qa11.md deleted file mode 100644 index c1ff75f708b9..000000000000 --- a/_videos/pt20qa11.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'PyTorch 2.0 Q&A: TorchRL' -youtube_id: myEfUoYrbts -date: Feb 16, 2023 ---- diff --git a/_videos/pt20qa12.md b/_videos/pt20qa12.md deleted file mode 100644 index 06a8f75a88a0..000000000000 --- a/_videos/pt20qa12.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'PyTorch 2.0 Q&A: TorchMultiModal' -youtube_id: L7W2-0pwsFI -date: Feb 24, 2023 ---- diff --git a/_videos/pt20qa2.md b/_videos/pt20qa2.md deleted file mode 100644 index d05f4e1d12df..000000000000 --- a/_videos/pt20qa2.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'PyTorch 2.0 Live Q&A Series: PT2 Profiling and Debugging' -youtube_id: 1FSBurHpH_Q -date: Dec 16, 2022 ---- diff --git a/_videos/pt20qa3.md b/_videos/pt20qa3.md deleted file mode 100644 index 844d5bec7cef..000000000000 --- a/_videos/pt20qa3.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'PyTorch 2.0 Live Q&A Series: A Deep Dive on TorchDynamo' -youtube_id: 5FNHwPIyHr8 -date: Dec 20, 2022 ---- diff --git a/_videos/pt20qa4.md b/_videos/pt20qa4.md deleted file mode 100644 index 1a8ae72d9c9b..000000000000 --- a/_videos/pt20qa4.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'PyTorch 2.0 Live Q&A Series: PyTorch 2.0 Export' -youtube_id: U6J5hl6nXlU -date: Dec 22, 2022 ---- diff --git a/_videos/pt20qa5.md b/_videos/pt20qa5.md deleted file mode 100644 index 181fdb47a228..000000000000 --- a/_videos/pt20qa5.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'PyTorch 2.0 Live Q&A Series: TorchRec and FSDP in Production' -youtube_id: NgW6gp69ssc -date: Dec 22, 2022 ---- diff --git a/_videos/pt20qa6.md b/_videos/pt20qa6.md deleted file mode 100644 index 38f64fdcc8d5..000000000000 --- a/_videos/pt20qa6.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'PyTorch 2.0 Ask the Engineers Q&A Series: Deep Dive into TorchInductor and PT2 Backend Integration' -youtube_id: AaFc3C7CZAs -date: Jan 26, 2023 ---- diff --git a/_videos/pt20qa7.md b/_videos/pt20qa7.md deleted file mode 100644 index 11f736aca14d..000000000000 --- a/_videos/pt20qa7.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'PyTorch 2.0 Ask the Engineers Q&A Series: PT2 and Distributed (DDP/FSDP)' -youtube_id: 6S4tH9qEswo -date: Jan 25, 2023 ---- diff --git a/_videos/pt20qa8.md b/_videos/pt20qa8.md deleted file mode 100644 index 4fc96052374c..000000000000 --- a/_videos/pt20qa8.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'PyTorch 2.0 Q&A: Rethinking Data Loading with TorchData' -youtube_id: 65DvI3YrFW8 -date: Feb 2, 2023 ---- diff --git a/_videos/pt20qa9.md b/_videos/pt20qa9.md deleted file mode 100644 index 36125361fade..000000000000 --- a/_videos/pt20qa9.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'PyTorch 2.0 Q&A: Optimizing Transformers for Inference' -youtube_id: ZOWjOxC80qw -date: Feb 3, 2023 ---- diff --git a/_videos/ptconf1.md b/_videos/ptconf1.md deleted file mode 100644 index 9cceb6da68d7..000000000000 --- a/_videos/ptconf1.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'Lightning Talk: State of PyTorch - Alban Desmaison, Meta - Speakers: Alban Desmaison' -youtube_id: dR0lHxt3Tjo -date: Oct 25, 2023 ---- diff --git a/_videos/ptconf11.md b/_videos/ptconf11.md deleted file mode 100644 index 9d859c6eb460..000000000000 --- a/_videos/ptconf11.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'Lightning Talk: Large-Scale Distributed Training with Dynamo and PyTorch/XLA SPMD - Yeounoh Chung & Jiewen Tan, Google' -youtube_id: tWH2MAHzVVc -date: Oct 25, 2023 ---- diff --git a/_videos/ptconf12.md b/_videos/ptconf12.md deleted file mode 100644 index 16ccea6a767b..000000000000 --- a/_videos/ptconf12.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'Lightning Talk: PyTorch 2.0 on the ROCm Platform - Douglas Lehr, AMD' -youtube_id: lN-LrBqpeaA -date: Oct 25, 2023 ---- diff --git a/_videos/ptconf13.md b/_videos/ptconf13.md deleted file mode 100644 index 27acd9aafd72..000000000000 --- a/_videos/ptconf13.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'Lightning Talk: Accelerated Inference in PyTorch 2.X with Torch-TensorRT - George Stefanakis & Dheeraj Peri, NVIDIA' -youtube_id: eGDMJ3MY4zk -date: Oct 25, 2023 ---- diff --git a/_videos/ptconf15.md b/_videos/ptconf15.md deleted file mode 100644 index 544f10dfd178..000000000000 --- a/_videos/ptconf15.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'Lightning Talk: Streamlining Model Export with the New ONNX Exporter - Maanav Dalal & Aaron Bockover' -youtube_id: cDDWD8KhUbQ -date: Oct 25, 2023 ---- diff --git a/_videos/ptconf16.md b/_videos/ptconf16.md deleted file mode 100644 index e49da6755f1c..000000000000 --- a/_videos/ptconf16.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'Lightning Talk: Efficient Inference at the Edge: Performance You Need at the Lowest Power You Deserve - Felix Baum, Qualcomm' -youtube_id: AEY64cbP4h8 -date: Oct 25, 2023 ---- diff --git a/_videos/ptconf2.md b/_videos/ptconf2.md deleted file mode 100644 index b052e39235e1..000000000000 --- a/_videos/ptconf2.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'Lightning Talk: TorchFix - a Linter for PyTorch-Using Code with Autofix Support - Sergii Dymchenko' -youtube_id: qLU2JD_PtiY -date: Oct 25, 2023 ---- diff --git a/_videos/ptconf3.md b/_videos/ptconf3.md deleted file mode 100644 index 7f95cb1c6310..000000000000 --- a/_videos/ptconf3.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: "What's New for PyTorch Developer Infrastructure - Eli Uriegas & Omkar Salpekar" -youtube_id: I95KmF6KSIA -date: Oct 25, 2023 ---- diff --git a/_videos/ptconf4.md b/_videos/ptconf4.md deleted file mode 100644 index df46c48c697e..000000000000 --- a/_videos/ptconf4.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'Lightning Talk: Enhancements Made to MPS Backend in PyTorch for Applications Running on Mac Platforms - Kulin Seth, Apple' -youtube_id: Np8YEW011dg -date: Oct 25, 2023 ---- diff --git a/_videos/ptconf5.md b/_videos/ptconf5.md deleted file mode 100644 index 64893ec93344..000000000000 --- a/_videos/ptconf5.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'PyTorch Korea User Group: The Beginning, Present, and Future - Junghwan Park' -youtube_id: 80MGwzKQOc4 -date: Oct 25, 2023 ---- diff --git a/_videos/ptconf6.md b/_videos/ptconf6.md deleted file mode 100644 index 7041e922780b..000000000000 --- a/_videos/ptconf6.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'Lightning Talk: Triton Compiler - Thomas Raoux, OpenAI' -youtube_id: AtbnRIzpwho -date: Oct 25, 2023 ---- diff --git a/_videos/ptconf7.md b/_videos/ptconf7.md deleted file mode 100644 index 68931368a89e..000000000000 --- a/_videos/ptconf7.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'Lightning Talk: Harnessing NVIDIA Tensor Cores: An Exploration of CUTLASS & OpenAI Triton - Matthew Nicely US, NVIDIA' -youtube_id: yCyZEJrlrfY -date: Oct 25, 2023 ---- diff --git a/_videos/ptconf8.md b/_videos/ptconf8.md deleted file mode 100644 index 16ccea6a767b..000000000000 --- a/_videos/ptconf8.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'Lightning Talk: PyTorch 2.0 on the ROCm Platform - Douglas Lehr, AMD' -youtube_id: lN-LrBqpeaA -date: Oct 25, 2023 ---- diff --git a/_videos/vid1.md b/_videos/vid1.md deleted file mode 100644 index d42b1576182d..000000000000 --- a/_videos/vid1.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'PyTorch 2.5 Live Q&A' -youtube_id: B3IgXpl4xt4 -date: Oct 21, 2024 ---- diff --git a/_videos/vid10.md b/_videos/vid10.md deleted file mode 100644 index faf1c637b5ae..000000000000 --- a/_videos/vid10.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'Using PyTorch and DINOv2 for Multi-label Plant Species Classification' -youtube_id: rxVg3yrc51s -date: Mar 28, 2025 ---- diff --git a/_videos/vid11.md b/_videos/vid11.md deleted file mode 100644 index b7720dd02abb..000000000000 --- a/_videos/vid11.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'PyTorch Expert Exchange – Multi-Modal Tabular Deep Learning with PyTorch Frame' -youtube_id: zPjLHf0X78w -date: Feb 20, 2025 ---- diff --git a/_videos/vid12.md b/_videos/vid12.md deleted file mode 100644 index f3ba5fc289fa..000000000000 --- a/_videos/vid12.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'PyTorch 2.6 Release Live Q&A' -youtube_id: 1OopuwTq6oE -date: Feb 8, 2025 ---- diff --git a/_videos/vid13.md b/_videos/vid13.md deleted file mode 100644 index 747642d8aea4..000000000000 --- a/_videos/vid13.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'How does batching work on modern GPUs?' -youtube_id: HTcnp9NEHGY -date: Nov 14, 2024 ---- diff --git a/_videos/vid2.md b/_videos/vid2.md deleted file mode 100644 index b7900abc0ec0..000000000000 --- a/_videos/vid2.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'DistServe: disaggregating prefill and decoding for goodput-optimized LLM inference' -youtube_id: Bh-jlh5vlF0 -date: Oct 16, 2024 ---- diff --git a/_videos/vid3.md b/_videos/vid3.md deleted file mode 100644 index 92bae2c1fa3e..000000000000 --- a/_videos/vid3.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'Efficient Streaming Language Models with Attention Sinks' -youtube_id: RnM84Sv9WpA -date: Oct 11, 2024 ---- diff --git a/_videos/vid4.md b/_videos/vid4.md deleted file mode 100644 index c29ec8d1e005..000000000000 --- a/_videos/vid4.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'PyTorch Expert Exchange: Adapting open source models with Open-Instruct and Tulu' -youtube_id: e1qUJFAo10s -date: Sep 11, 2024 ---- diff --git a/_videos/vid5.md b/_videos/vid5.md deleted file mode 100644 index 110c90047648..000000000000 --- a/_videos/vid5.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'PyTorch Expert Exchange: Efficient Generative Models: From Sparse to Distributed Inference' -youtube_id: Eqg0VIiWrgM -date: Aug 30, 2024 ---- diff --git a/_videos/vid6.md b/_videos/vid6.md deleted file mode 100644 index e456938c3d9e..000000000000 --- a/_videos/vid6.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'torch.compile: The Missing Manual' -youtube_id: rew5CSUaIXg -date: Aug 13, 2024 ---- diff --git a/_videos/vid7.md b/_videos/vid7.md deleted file mode 100644 index 02bea05040a7..000000000000 --- a/_videos/vid7.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'Using PyTorch for Monocular Depth Estimation Webinar' -youtube_id: xf2QgioY370 -date: Sep 27, 2024 ---- diff --git a/_videos/vid8.md b/_videos/vid8.md deleted file mode 100644 index 550290a68f3c..000000000000 --- a/_videos/vid8.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'Accelerating LLM family of models on Arm Neoverse based Graviton AWS processors with KleidiAI' -youtube_id: NeHIhQWewug -date: Aug 22, 2024 ---- diff --git a/_videos/vid9.md b/_videos/vid9.md deleted file mode 100644 index 769901483585..000000000000 --- a/_videos/vid9.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: 'PyTorch 2.4: Live Q&A' -youtube_id: ry_QgUIYX1E -date: Jul 25, 2024 ---- diff --git a/assets/css/style.css b/assets/css/style.css new file mode 100644 index 000000000000..1f9ba713ded3 --- /dev/null +++ b/assets/css/style.css @@ -0,0 +1 @@ +/*! normalize.css v4.1.1 | MIT License | github.com/necolas/normalize.css */html{font-family:sans-serif;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,main,menu,nav,section{display:block}summary{display:list-item}audio,canvas,progress,video{display:inline-block}audio:not([controls]){display:none;height:0}progress{vertical-align:baseline}template,[hidden]{display:none !important}a{background-color:transparent}a:active,a:hover{outline-width:0}abbr[title]{border-bottom:none;text-decoration:underline;-webkit-text-decoration:underline dotted;text-decoration:underline dotted}b,strong{font-weight:inherit}b,strong{font-weight:bolder}dfn{font-style:italic}h1{font-size:2em;margin:0.67em 0}mark{background-color:#ff0;color:#000}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-0.25em}sup{top:-0.5em}img{border-style:none}svg:not(:root){overflow:hidden}code,kbd,pre,samp{font-family:monospace, monospace;font-size:1em}figure{margin:1em 40px}hr{box-sizing:content-box;height:0;overflow:visible}button,input,select,textarea{font:inherit;margin:0}optgroup{font-weight:bold}button,input{overflow:visible}button,select{text-transform:none}button,html [type="button"],[type="reset"],[type="submit"]{-webkit-appearance:button}button::-moz-focus-inner,[type="button"]::-moz-focus-inner,[type="reset"]::-moz-focus-inner,[type="submit"]::-moz-focus-inner{border-style:none;padding:0}button:-moz-focusring,[type="button"]:-moz-focusring,[type="reset"]:-moz-focusring,[type="submit"]:-moz-focusring{outline:1px dotted ButtonText}fieldset{border:1px solid #c0c0c0;margin:0 2px;padding:0.35em 0.625em 0.75em}legend{box-sizing:border-box;color:inherit;display:table;max-width:100%;padding:0;white-space:normal}textarea{overflow:auto}[type="checkbox"],[type="radio"]{box-sizing:border-box;padding:0}[type="number"]::-webkit-inner-spin-button,[type="number"]::-webkit-outer-spin-button{height:auto}[type="search"]{-webkit-appearance:textfield;outline-offset:-2px}[type="search"]::-webkit-search-cancel-button,[type="search"]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-input-placeholder{color:inherit;opacity:0.54}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}*{box-sizing:border-box}input,select,textarea,button{font-family:inherit;font-size:inherit;line-height:inherit}body{font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Helvetica,Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol";font-size:14px;line-height:1.5;color:#24292e;background-color:#fff}a{color:#0366d6;text-decoration:none}a:hover{text-decoration:underline}b,strong{font-weight:600}hr,.rule{height:0;margin:15px 0;overflow:hidden;background:transparent;border:0;border-bottom:1px solid #dfe2e5}hr::before,.rule::before{display:table;content:""}hr::after,.rule::after{display:table;clear:both;content:""}table{border-spacing:0;border-collapse:collapse}td,th{padding:0}button{cursor:pointer;border-radius:0}[hidden][hidden]{display:none !important}details summary{cursor:pointer}details:not([open])>*:not(summary){display:none !important}h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:0}h1{font-size:32px;font-weight:600}h2{font-size:24px;font-weight:600}h3{font-size:20px;font-weight:600}h4{font-size:16px;font-weight:600}h5{font-size:14px;font-weight:600}h6{font-size:12px;font-weight:600}p{margin-top:0;margin-bottom:10px}small{font-size:90%}blockquote{margin:0}ul,ol{padding-left:0;margin-top:0;margin-bottom:0}ol ol,ul ol{list-style-type:lower-roman}ul ul ol,ul ol ol,ol ul ol,ol ol ol{list-style-type:lower-alpha}dd{margin-left:0}tt,code{font-family:"SFMono-Regular",Consolas,"Liberation Mono",Menlo,Courier,monospace;font-size:12px}pre{margin-top:0;margin-bottom:0;font-family:"SFMono-Regular",Consolas,"Liberation Mono",Menlo,Courier,monospace;font-size:12px}.octicon{vertical-align:text-bottom}.anim-fade-in{-webkit-animation-name:fade-in;animation-name:fade-in;-webkit-animation-duration:1s;animation-duration:1s;-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out}.anim-fade-in.fast{-webkit-animation-duration:300ms;animation-duration:300ms}@-webkit-keyframes fade-in{0%{opacity:0}100%{opacity:1}}@keyframes fade-in{0%{opacity:0}100%{opacity:1}}.anim-fade-out{-webkit-animation-name:fade-out;animation-name:fade-out;-webkit-animation-duration:1s;animation-duration:1s;-webkit-animation-timing-function:ease-out;animation-timing-function:ease-out}.anim-fade-out.fast{-webkit-animation-duration:0.3s;animation-duration:0.3s}@-webkit-keyframes fade-out{0%{opacity:1}100%{opacity:0}}@keyframes fade-out{0%{opacity:1}100%{opacity:0}}.anim-fade-up{opacity:0;-webkit-animation-name:fade-up;animation-name:fade-up;-webkit-animation-duration:0.3s;animation-duration:0.3s;-webkit-animation-fill-mode:forwards;animation-fill-mode:forwards;-webkit-animation-timing-function:ease-out;animation-timing-function:ease-out;-webkit-animation-delay:1s;animation-delay:1s}@-webkit-keyframes fade-up{0%{opacity:0.8;transform:translateY(100%)}100%{opacity:1;transform:translateY(0)}}@keyframes fade-up{0%{opacity:0.8;transform:translateY(100%)}100%{opacity:1;transform:translateY(0)}}.anim-fade-down{-webkit-animation-name:fade-down;animation-name:fade-down;-webkit-animation-duration:0.3s;animation-duration:0.3s;-webkit-animation-fill-mode:forwards;animation-fill-mode:forwards;-webkit-animation-timing-function:ease-in;animation-timing-function:ease-in}@-webkit-keyframes fade-down{0%{opacity:1;transform:translateY(0)}100%{opacity:0.5;transform:translateY(100%)}}@keyframes fade-down{0%{opacity:1;transform:translateY(0)}100%{opacity:0.5;transform:translateY(100%)}}.anim-grow-x{width:0%;-webkit-animation-name:grow-x;animation-name:grow-x;-webkit-animation-duration:0.3s;animation-duration:0.3s;-webkit-animation-fill-mode:forwards;animation-fill-mode:forwards;-webkit-animation-timing-function:ease;animation-timing-function:ease;-webkit-animation-delay:0.5s;animation-delay:0.5s}@-webkit-keyframes grow-x{to{width:100%}}@keyframes grow-x{to{width:100%}}.anim-shrink-x{-webkit-animation-name:shrink-x;animation-name:shrink-x;-webkit-animation-duration:0.3s;animation-duration:0.3s;-webkit-animation-fill-mode:forwards;animation-fill-mode:forwards;-webkit-animation-timing-function:ease-in-out;animation-timing-function:ease-in-out;-webkit-animation-delay:0.5s;animation-delay:0.5s}@-webkit-keyframes shrink-x{to{width:0%}}@keyframes shrink-x{to{width:0%}}.anim-scale-in{-webkit-animation-name:scale-in;animation-name:scale-in;-webkit-animation-duration:0.15s;animation-duration:0.15s;-webkit-animation-timing-function:cubic-bezier(0.2, 0, 0.13, 1.5);animation-timing-function:cubic-bezier(0.2, 0, 0.13, 1.5)}@-webkit-keyframes scale-in{0%{opacity:0;transform:scale(0.5)}100%{opacity:1;transform:scale(1)}}@keyframes scale-in{0%{opacity:0;transform:scale(0.5)}100%{opacity:1;transform:scale(1)}}.anim-pulse{-webkit-animation-name:pulse;animation-name:pulse;-webkit-animation-duration:2s;animation-duration:2s;-webkit-animation-timing-function:linear;animation-timing-function:linear;-webkit-animation-iteration-count:infinite;animation-iteration-count:infinite}@-webkit-keyframes pulse{0%{opacity:0.3}10%{opacity:1}100%{opacity:0.3}}@keyframes pulse{0%{opacity:0.3}10%{opacity:1}100%{opacity:0.3}}.anim-pulse-in{-webkit-animation-name:pulse-in;animation-name:pulse-in;-webkit-animation-duration:0.5s;animation-duration:0.5s}@-webkit-keyframes pulse-in{0%{transform:scale3d(1, 1, 1)}50%{transform:scale3d(1.1, 1.1, 1.1)}100%{transform:scale3d(1, 1, 1)}}@keyframes pulse-in{0%{transform:scale3d(1, 1, 1)}50%{transform:scale3d(1.1, 1.1, 1.1)}100%{transform:scale3d(1, 1, 1)}}.hover-grow{transition:transform 0.3s;-webkit-backface-visibility:hidden;backface-visibility:hidden}.hover-grow:hover{transform:scale(1.025)}.border{border:1px #e1e4e8 solid !important}.border-y{border-top:1px #e1e4e8 solid !important;border-bottom:1px #e1e4e8 solid !important}.border-0{border:0 !important}.border-dashed{border-style:dashed !important}.border-blue{border-color:#0366d6 !important}.border-blue-light{border-color:#c8e1ff !important}.border-green{border-color:#34d058 !important}.border-green-light{border-color:#a2cbac !important}.border-red{border-color:#d73a49 !important}.border-red-light{border-color:#cea0a5 !important}.border-purple{border-color:#6f42c1 !important}.border-yellow{border-color:#d9d0a5 !important}.border-gray-light{border-color:#eaecef !important}.border-gray-dark{border-color:#d1d5da !important}.border-black-fade{border-color:rgba(27,31,35,0.15) !important}.border-top{border-top:1px #e1e4e8 solid !important}.border-right{border-right:1px #e1e4e8 solid !important}.border-bottom{border-bottom:1px #e1e4e8 solid !important}.border-left{border-left:1px #e1e4e8 solid !important}.border-top-0{border-top:0 !important}.border-right-0{border-right:0 !important}.border-bottom-0{border-bottom:0 !important}.border-left-0{border-left:0 !important}.rounded-0{border-radius:0 !important}.rounded-1{border-radius:3px !important}.rounded-2{border-radius:6px !important}.rounded-top-0{border-top-left-radius:0 !important;border-top-right-radius:0 !important}.rounded-top-1{border-top-left-radius:3px !important;border-top-right-radius:3px !important}.rounded-top-2{border-top-left-radius:6px !important;border-top-right-radius:6px !important}.rounded-right-0{border-top-right-radius:0 !important;border-bottom-right-radius:0 !important}.rounded-right-1{border-top-right-radius:3px !important;border-bottom-right-radius:3px !important}.rounded-right-2{border-top-right-radius:6px !important;border-bottom-right-radius:6px !important}.rounded-bottom-0{border-bottom-right-radius:0 !important;border-bottom-left-radius:0 !important}.rounded-bottom-1{border-bottom-right-radius:3px !important;border-bottom-left-radius:3px !important}.rounded-bottom-2{border-bottom-right-radius:6px !important;border-bottom-left-radius:6px !important}.rounded-left-0{border-bottom-left-radius:0 !important;border-top-left-radius:0 !important}.rounded-left-1{border-bottom-left-radius:3px !important;border-top-left-radius:3px !important}.rounded-left-2{border-bottom-left-radius:6px !important;border-top-left-radius:6px !important}@media (min-width: 544px){.border-sm-top{border-top:1px #e1e4e8 solid !important}.border-sm-right{border-right:1px #e1e4e8 solid !important}.border-sm-bottom{border-bottom:1px #e1e4e8 solid !important}.border-sm-left{border-left:1px #e1e4e8 solid !important}.border-sm-top-0{border-top:0 !important}.border-sm-right-0{border-right:0 !important}.border-sm-bottom-0{border-bottom:0 !important}.border-sm-left-0{border-left:0 !important}.rounded-sm-0{border-radius:0 !important}.rounded-sm-1{border-radius:3px !important}.rounded-sm-2{border-radius:6px !important}.rounded-sm-top-0{border-top-left-radius:0 !important;border-top-right-radius:0 !important}.rounded-sm-top-1{border-top-left-radius:3px !important;border-top-right-radius:3px !important}.rounded-sm-top-2{border-top-left-radius:6px !important;border-top-right-radius:6px !important}.rounded-sm-right-0{border-top-right-radius:0 !important;border-bottom-right-radius:0 !important}.rounded-sm-right-1{border-top-right-radius:3px !important;border-bottom-right-radius:3px !important}.rounded-sm-right-2{border-top-right-radius:6px !important;border-bottom-right-radius:6px !important}.rounded-sm-bottom-0{border-bottom-right-radius:0 !important;border-bottom-left-radius:0 !important}.rounded-sm-bottom-1{border-bottom-right-radius:3px !important;border-bottom-left-radius:3px !important}.rounded-sm-bottom-2{border-bottom-right-radius:6px !important;border-bottom-left-radius:6px !important}.rounded-sm-left-0{border-bottom-left-radius:0 !important;border-top-left-radius:0 !important}.rounded-sm-left-1{border-bottom-left-radius:3px !important;border-top-left-radius:3px !important}.rounded-sm-left-2{border-bottom-left-radius:6px !important;border-top-left-radius:6px !important}}@media (min-width: 768px){.border-md-top{border-top:1px #e1e4e8 solid !important}.border-md-right{border-right:1px #e1e4e8 solid !important}.border-md-bottom{border-bottom:1px #e1e4e8 solid !important}.border-md-left{border-left:1px #e1e4e8 solid !important}.border-md-top-0{border-top:0 !important}.border-md-right-0{border-right:0 !important}.border-md-bottom-0{border-bottom:0 !important}.border-md-left-0{border-left:0 !important}.rounded-md-0{border-radius:0 !important}.rounded-md-1{border-radius:3px !important}.rounded-md-2{border-radius:6px !important}.rounded-md-top-0{border-top-left-radius:0 !important;border-top-right-radius:0 !important}.rounded-md-top-1{border-top-left-radius:3px !important;border-top-right-radius:3px !important}.rounded-md-top-2{border-top-left-radius:6px !important;border-top-right-radius:6px !important}.rounded-md-right-0{border-top-right-radius:0 !important;border-bottom-right-radius:0 !important}.rounded-md-right-1{border-top-right-radius:3px !important;border-bottom-right-radius:3px !important}.rounded-md-right-2{border-top-right-radius:6px !important;border-bottom-right-radius:6px !important}.rounded-md-bottom-0{border-bottom-right-radius:0 !important;border-bottom-left-radius:0 !important}.rounded-md-bottom-1{border-bottom-right-radius:3px !important;border-bottom-left-radius:3px !important}.rounded-md-bottom-2{border-bottom-right-radius:6px !important;border-bottom-left-radius:6px !important}.rounded-md-left-0{border-bottom-left-radius:0 !important;border-top-left-radius:0 !important}.rounded-md-left-1{border-bottom-left-radius:3px !important;border-top-left-radius:3px !important}.rounded-md-left-2{border-bottom-left-radius:6px !important;border-top-left-radius:6px !important}}@media (min-width: 1012px){.border-lg-top{border-top:1px #e1e4e8 solid !important}.border-lg-right{border-right:1px #e1e4e8 solid !important}.border-lg-bottom{border-bottom:1px #e1e4e8 solid !important}.border-lg-left{border-left:1px #e1e4e8 solid !important}.border-lg-top-0{border-top:0 !important}.border-lg-right-0{border-right:0 !important}.border-lg-bottom-0{border-bottom:0 !important}.border-lg-left-0{border-left:0 !important}.rounded-lg-0{border-radius:0 !important}.rounded-lg-1{border-radius:3px !important}.rounded-lg-2{border-radius:6px !important}.rounded-lg-top-0{border-top-left-radius:0 !important;border-top-right-radius:0 !important}.rounded-lg-top-1{border-top-left-radius:3px !important;border-top-right-radius:3px !important}.rounded-lg-top-2{border-top-left-radius:6px !important;border-top-right-radius:6px !important}.rounded-lg-right-0{border-top-right-radius:0 !important;border-bottom-right-radius:0 !important}.rounded-lg-right-1{border-top-right-radius:3px !important;border-bottom-right-radius:3px !important}.rounded-lg-right-2{border-top-right-radius:6px !important;border-bottom-right-radius:6px !important}.rounded-lg-bottom-0{border-bottom-right-radius:0 !important;border-bottom-left-radius:0 !important}.rounded-lg-bottom-1{border-bottom-right-radius:3px !important;border-bottom-left-radius:3px !important}.rounded-lg-bottom-2{border-bottom-right-radius:6px !important;border-bottom-left-radius:6px !important}.rounded-lg-left-0{border-bottom-left-radius:0 !important;border-top-left-radius:0 !important}.rounded-lg-left-1{border-bottom-left-radius:3px !important;border-top-left-radius:3px !important}.rounded-lg-left-2{border-bottom-left-radius:6px !important;border-top-left-radius:6px !important}}@media (min-width: 1280px){.border-xl-top{border-top:1px #e1e4e8 solid !important}.border-xl-right{border-right:1px #e1e4e8 solid !important}.border-xl-bottom{border-bottom:1px #e1e4e8 solid !important}.border-xl-left{border-left:1px #e1e4e8 solid !important}.border-xl-top-0{border-top:0 !important}.border-xl-right-0{border-right:0 !important}.border-xl-bottom-0{border-bottom:0 !important}.border-xl-left-0{border-left:0 !important}.rounded-xl-0{border-radius:0 !important}.rounded-xl-1{border-radius:3px !important}.rounded-xl-2{border-radius:6px !important}.rounded-xl-top-0{border-top-left-radius:0 !important;border-top-right-radius:0 !important}.rounded-xl-top-1{border-top-left-radius:3px !important;border-top-right-radius:3px !important}.rounded-xl-top-2{border-top-left-radius:6px !important;border-top-right-radius:6px !important}.rounded-xl-right-0{border-top-right-radius:0 !important;border-bottom-right-radius:0 !important}.rounded-xl-right-1{border-top-right-radius:3px !important;border-bottom-right-radius:3px !important}.rounded-xl-right-2{border-top-right-radius:6px !important;border-bottom-right-radius:6px !important}.rounded-xl-bottom-0{border-bottom-right-radius:0 !important;border-bottom-left-radius:0 !important}.rounded-xl-bottom-1{border-bottom-right-radius:3px !important;border-bottom-left-radius:3px !important}.rounded-xl-bottom-2{border-bottom-right-radius:6px !important;border-bottom-left-radius:6px !important}.rounded-xl-left-0{border-bottom-left-radius:0 !important;border-top-left-radius:0 !important}.rounded-xl-left-1{border-bottom-left-radius:3px !important;border-top-left-radius:3px !important}.rounded-xl-left-2{border-bottom-left-radius:6px !important;border-top-left-radius:6px !important}}.circle{border-radius:50% !important}.box-shadow{box-shadow:0 1px 1px rgba(27,31,35,0.1) !important}.box-shadow-medium{box-shadow:0 1px 5px rgba(27,31,35,0.15) !important}.box-shadow-large{box-shadow:0 1px 15px rgba(27,31,35,0.15) !important}.box-shadow-extra-large{box-shadow:0 10px 50px rgba(27,31,35,0.07) !important}.box-shadow-none{box-shadow:none !important}.bg-white{background-color:#fff !important}.bg-blue{background-color:#0366d6 !important}.bg-blue-light{background-color:#f1f8ff !important}.bg-gray-dark{background-color:#24292e !important}.bg-gray{background-color:#f6f8fa !important}.bg-gray-light{background-color:#fafbfc !important}.bg-green{background-color:#28a745 !important}.bg-green-light{background-color:#dcffe4 !important}.bg-red{background-color:#d73a49 !important}.bg-red-light{background-color:#ffdce0 !important}.bg-yellow{background-color:#ffd33d !important}.bg-yellow-light{background-color:#fff5b1 !important}.bg-purple{background-color:#6f42c1 !important}.bg-purple-light{background-color:#f5f0ff !important}.bg-shade-gradient{background-image:linear-gradient(180deg, rgba(27,31,35,0.065), rgba(27,31,35,0)) !important;background-repeat:no-repeat !important;background-size:100% 200px !important}.text-blue{color:#0366d6 !important}.text-red{color:#cb2431 !important}.text-gray-light{color:#6a737d !important}.text-gray{color:#586069 !important}.text-gray-dark{color:#24292e !important}.text-green{color:#28a745 !important}.text-orange{color:#a04100 !important}.text-orange-light{color:#e36209 !important}.text-purple{color:#6f42c1 !important}.text-white{color:#fff !important}.text-inherit{color:inherit !important}.text-pending{color:#b08800 !important}.bg-pending{color:#dbab09 !important}.link-gray{color:#586069 !important}.link-gray:hover{color:#0366d6 !important}.link-gray-dark{color:#24292e !important}.link-gray-dark:hover{color:#0366d6 !important}.link-hover-blue:hover{color:#0366d6 !important}.muted-link{color:#586069 !important}.muted-link:hover{color:#0366d6 !important;text-decoration:none}.details-overlay[open]>summary::before{position:fixed;top:0;right:0;bottom:0;left:0;z-index:80;display:block;cursor:default;content:" ";background:transparent}.details-overlay-dark[open]>summary::before{z-index:99;background:rgba(27,31,35,0.5)}.flex-row{flex-direction:row !important}.flex-row-reverse{flex-direction:row-reverse !important}.flex-column{flex-direction:column !important}.flex-wrap{flex-wrap:wrap !important}.flex-nowrap{flex-wrap:nowrap !important}.flex-justify-start{justify-content:flex-start !important}.flex-justify-end{justify-content:flex-end !important}.flex-justify-center{justify-content:center !important}.flex-justify-between{justify-content:space-between !important}.flex-justify-around{justify-content:space-around !important}.flex-items-start{align-items:flex-start !important}.flex-items-end{align-items:flex-end !important}.flex-items-center{align-items:center !important}.flex-items-baseline{align-items:baseline !important}.flex-items-stretch{align-items:stretch !important}.flex-content-start{align-content:flex-start !important}.flex-content-end{align-content:flex-end !important}.flex-content-center{align-content:center !important}.flex-content-between{align-content:space-between !important}.flex-content-around{align-content:space-around !important}.flex-content-stretch{align-content:stretch !important}.flex-auto{flex:1 1 auto !important}.flex-shrink-0{flex-shrink:0 !important}.flex-self-auto{align-self:auto !important}.flex-self-start{align-self:flex-start !important}.flex-self-end{align-self:flex-end !important}.flex-self-center{align-self:center !important}.flex-self-baseline{align-self:baseline !important}.flex-self-stretch{align-self:stretch !important}.flex-item-equal{flex-grow:1;flex-basis:0}@media (min-width: 544px){.flex-sm-row{flex-direction:row !important}.flex-sm-row-reverse{flex-direction:row-reverse !important}.flex-sm-column{flex-direction:column !important}.flex-sm-wrap{flex-wrap:wrap !important}.flex-sm-nowrap{flex-wrap:nowrap !important}.flex-sm-justify-start{justify-content:flex-start !important}.flex-sm-justify-end{justify-content:flex-end !important}.flex-sm-justify-center{justify-content:center !important}.flex-sm-justify-between{justify-content:space-between !important}.flex-sm-justify-around{justify-content:space-around !important}.flex-sm-items-start{align-items:flex-start !important}.flex-sm-items-end{align-items:flex-end !important}.flex-sm-items-center{align-items:center !important}.flex-sm-items-baseline{align-items:baseline !important}.flex-sm-items-stretch{align-items:stretch !important}.flex-sm-content-start{align-content:flex-start !important}.flex-sm-content-end{align-content:flex-end !important}.flex-sm-content-center{align-content:center !important}.flex-sm-content-between{align-content:space-between !important}.flex-sm-content-around{align-content:space-around !important}.flex-sm-content-stretch{align-content:stretch !important}.flex-sm-auto{flex:1 1 auto !important}.flex-sm-shrink-0{flex-shrink:0 !important}.flex-sm-self-auto{align-self:auto !important}.flex-sm-self-start{align-self:flex-start !important}.flex-sm-self-end{align-self:flex-end !important}.flex-sm-self-center{align-self:center !important}.flex-sm-self-baseline{align-self:baseline !important}.flex-sm-self-stretch{align-self:stretch !important}.flex-sm-item-equal{flex-grow:1;flex-basis:0}}@media (min-width: 768px){.flex-md-row{flex-direction:row !important}.flex-md-row-reverse{flex-direction:row-reverse !important}.flex-md-column{flex-direction:column !important}.flex-md-wrap{flex-wrap:wrap !important}.flex-md-nowrap{flex-wrap:nowrap !important}.flex-md-justify-start{justify-content:flex-start !important}.flex-md-justify-end{justify-content:flex-end !important}.flex-md-justify-center{justify-content:center !important}.flex-md-justify-between{justify-content:space-between !important}.flex-md-justify-around{justify-content:space-around !important}.flex-md-items-start{align-items:flex-start !important}.flex-md-items-end{align-items:flex-end !important}.flex-md-items-center{align-items:center !important}.flex-md-items-baseline{align-items:baseline !important}.flex-md-items-stretch{align-items:stretch !important}.flex-md-content-start{align-content:flex-start !important}.flex-md-content-end{align-content:flex-end !important}.flex-md-content-center{align-content:center !important}.flex-md-content-between{align-content:space-between !important}.flex-md-content-around{align-content:space-around !important}.flex-md-content-stretch{align-content:stretch !important}.flex-md-auto{flex:1 1 auto !important}.flex-md-shrink-0{flex-shrink:0 !important}.flex-md-self-auto{align-self:auto !important}.flex-md-self-start{align-self:flex-start !important}.flex-md-self-end{align-self:flex-end !important}.flex-md-self-center{align-self:center !important}.flex-md-self-baseline{align-self:baseline !important}.flex-md-self-stretch{align-self:stretch !important}.flex-md-item-equal{flex-grow:1;flex-basis:0}}@media (min-width: 1012px){.flex-lg-row{flex-direction:row !important}.flex-lg-row-reverse{flex-direction:row-reverse !important}.flex-lg-column{flex-direction:column !important}.flex-lg-wrap{flex-wrap:wrap !important}.flex-lg-nowrap{flex-wrap:nowrap !important}.flex-lg-justify-start{justify-content:flex-start !important}.flex-lg-justify-end{justify-content:flex-end !important}.flex-lg-justify-center{justify-content:center !important}.flex-lg-justify-between{justify-content:space-between !important}.flex-lg-justify-around{justify-content:space-around !important}.flex-lg-items-start{align-items:flex-start !important}.flex-lg-items-end{align-items:flex-end !important}.flex-lg-items-center{align-items:center !important}.flex-lg-items-baseline{align-items:baseline !important}.flex-lg-items-stretch{align-items:stretch !important}.flex-lg-content-start{align-content:flex-start !important}.flex-lg-content-end{align-content:flex-end !important}.flex-lg-content-center{align-content:center !important}.flex-lg-content-between{align-content:space-between !important}.flex-lg-content-around{align-content:space-around !important}.flex-lg-content-stretch{align-content:stretch !important}.flex-lg-auto{flex:1 1 auto !important}.flex-lg-shrink-0{flex-shrink:0 !important}.flex-lg-self-auto{align-self:auto !important}.flex-lg-self-start{align-self:flex-start !important}.flex-lg-self-end{align-self:flex-end !important}.flex-lg-self-center{align-self:center !important}.flex-lg-self-baseline{align-self:baseline !important}.flex-lg-self-stretch{align-self:stretch !important}.flex-lg-item-equal{flex-grow:1;flex-basis:0}}@media (min-width: 1280px){.flex-xl-row{flex-direction:row !important}.flex-xl-row-reverse{flex-direction:row-reverse !important}.flex-xl-column{flex-direction:column !important}.flex-xl-wrap{flex-wrap:wrap !important}.flex-xl-nowrap{flex-wrap:nowrap !important}.flex-xl-justify-start{justify-content:flex-start !important}.flex-xl-justify-end{justify-content:flex-end !important}.flex-xl-justify-center{justify-content:center !important}.flex-xl-justify-between{justify-content:space-between !important}.flex-xl-justify-around{justify-content:space-around !important}.flex-xl-items-start{align-items:flex-start !important}.flex-xl-items-end{align-items:flex-end !important}.flex-xl-items-center{align-items:center !important}.flex-xl-items-baseline{align-items:baseline !important}.flex-xl-items-stretch{align-items:stretch !important}.flex-xl-content-start{align-content:flex-start !important}.flex-xl-content-end{align-content:flex-end !important}.flex-xl-content-center{align-content:center !important}.flex-xl-content-between{align-content:space-between !important}.flex-xl-content-around{align-content:space-around !important}.flex-xl-content-stretch{align-content:stretch !important}.flex-xl-auto{flex:1 1 auto !important}.flex-xl-shrink-0{flex-shrink:0 !important}.flex-xl-self-auto{align-self:auto !important}.flex-xl-self-start{align-self:flex-start !important}.flex-xl-self-end{align-self:flex-end !important}.flex-xl-self-center{align-self:center !important}.flex-xl-self-baseline{align-self:baseline !important}.flex-xl-self-stretch{align-self:stretch !important}.flex-xl-item-equal{flex-grow:1;flex-basis:0}}.position-static{position:static !important}.position-relative{position:relative !important}.position-absolute{position:absolute !important}.position-fixed{position:fixed !important}.top-0{top:0 !important}.right-0{right:0 !important}.bottom-0{bottom:0 !important}.left-0{left:0 !important}.v-align-middle{vertical-align:middle !important}.v-align-top{vertical-align:top !important}.v-align-bottom{vertical-align:bottom !important}.v-align-text-top{vertical-align:text-top !important}.v-align-text-bottom{vertical-align:text-bottom !important}.v-align-baseline{vertical-align:baseline !important}.overflow-hidden{overflow:hidden !important}.overflow-scroll{overflow:scroll !important}.overflow-auto{overflow:auto !important}.clearfix::before{display:table;content:""}.clearfix::after{display:table;clear:both;content:""}.float-left{float:left !important}.float-right{float:right !important}.float-none{float:none !important}@media (min-width: 544px){.float-sm-left{float:left !important}.float-sm-right{float:right !important}.float-sm-none{float:none !important}}@media (min-width: 768px){.float-md-left{float:left !important}.float-md-right{float:right !important}.float-md-none{float:none !important}}@media (min-width: 1012px){.float-lg-left{float:left !important}.float-lg-right{float:right !important}.float-lg-none{float:none !important}}@media (min-width: 1280px){.float-xl-left{float:left !important}.float-xl-right{float:right !important}.float-xl-none{float:none !important}}.width-fit{max-width:100% !important}.width-full{width:100% !important}.height-fit{max-height:100% !important}.height-full{height:100% !important}.min-width-0{min-width:0 !important}.direction-rtl{direction:rtl !important}.direction-ltr{direction:ltr !important}@media (min-width: 544px){.direction-sm-rtl{direction:rtl !important}.direction-sm-ltr{direction:ltr !important}}@media (min-width: 768px){.direction-md-rtl{direction:rtl !important}.direction-md-ltr{direction:ltr !important}}@media (min-width: 1012px){.direction-lg-rtl{direction:rtl !important}.direction-lg-ltr{direction:ltr !important}}@media (min-width: 1280px){.direction-xl-rtl{direction:rtl !important}.direction-xl-ltr{direction:ltr !important}}.m-0{margin:0 !important}.mt-0{margin-top:0 !important}.mr-0{margin-right:0 !important}.mb-0{margin-bottom:0 !important}.ml-0{margin-left:0 !important}.mx-0{margin-right:0 !important;margin-left:0 !important}.my-0{margin-top:0 !important;margin-bottom:0 !important}.m-1{margin:4px !important}.mt-1{margin-top:4px !important}.mr-1{margin-right:4px !important}.mb-1{margin-bottom:4px !important}.ml-1{margin-left:4px !important}.mt-n1{margin-top:-4px !important}.mr-n1{margin-right:-4px !important}.mb-n1{margin-bottom:-4px !important}.ml-n1{margin-left:-4px !important}.mx-1{margin-right:4px !important;margin-left:4px !important}.my-1{margin-top:4px !important;margin-bottom:4px !important}.m-2{margin:8px !important}.mt-2{margin-top:8px !important}.mr-2{margin-right:8px !important}.mb-2{margin-bottom:8px !important}.ml-2{margin-left:8px !important}.mt-n2{margin-top:-8px !important}.mr-n2{margin-right:-8px !important}.mb-n2{margin-bottom:-8px !important}.ml-n2{margin-left:-8px !important}.mx-2{margin-right:8px !important;margin-left:8px !important}.my-2{margin-top:8px !important;margin-bottom:8px !important}.m-3{margin:16px !important}.mt-3{margin-top:16px !important}.mr-3{margin-right:16px !important}.mb-3{margin-bottom:16px !important}.ml-3{margin-left:16px !important}.mt-n3{margin-top:-16px !important}.mr-n3{margin-right:-16px !important}.mb-n3{margin-bottom:-16px !important}.ml-n3{margin-left:-16px !important}.mx-3{margin-right:16px !important;margin-left:16px !important}.my-3{margin-top:16px !important;margin-bottom:16px !important}.m-4{margin:24px !important}.mt-4{margin-top:24px !important}.mr-4{margin-right:24px !important}.mb-4{margin-bottom:24px !important}.ml-4{margin-left:24px !important}.mt-n4{margin-top:-24px !important}.mr-n4{margin-right:-24px !important}.mb-n4{margin-bottom:-24px !important}.ml-n4{margin-left:-24px !important}.mx-4{margin-right:24px !important;margin-left:24px !important}.my-4{margin-top:24px !important;margin-bottom:24px !important}.m-5{margin:32px !important}.mt-5{margin-top:32px !important}.mr-5{margin-right:32px !important}.mb-5{margin-bottom:32px !important}.ml-5{margin-left:32px !important}.mt-n5{margin-top:-32px !important}.mr-n5{margin-right:-32px !important}.mb-n5{margin-bottom:-32px !important}.ml-n5{margin-left:-32px !important}.mx-5{margin-right:32px !important;margin-left:32px !important}.my-5{margin-top:32px !important;margin-bottom:32px !important}.m-6{margin:40px !important}.mt-6{margin-top:40px !important}.mr-6{margin-right:40px !important}.mb-6{margin-bottom:40px !important}.ml-6{margin-left:40px !important}.mt-n6{margin-top:-40px !important}.mr-n6{margin-right:-40px !important}.mb-n6{margin-bottom:-40px !important}.ml-n6{margin-left:-40px !important}.mx-6{margin-right:40px !important;margin-left:40px !important}.my-6{margin-top:40px !important;margin-bottom:40px !important}.mx-auto{margin-right:auto !important;margin-left:auto !important}@media (min-width: 544px){.m-sm-0{margin:0 !important}.mt-sm-0{margin-top:0 !important}.mr-sm-0{margin-right:0 !important}.mb-sm-0{margin-bottom:0 !important}.ml-sm-0{margin-left:0 !important}.mx-sm-0{margin-right:0 !important;margin-left:0 !important}.my-sm-0{margin-top:0 !important;margin-bottom:0 !important}.m-sm-1{margin:4px !important}.mt-sm-1{margin-top:4px !important}.mr-sm-1{margin-right:4px !important}.mb-sm-1{margin-bottom:4px !important}.ml-sm-1{margin-left:4px !important}.mt-sm-n1{margin-top:-4px !important}.mr-sm-n1{margin-right:-4px !important}.mb-sm-n1{margin-bottom:-4px !important}.ml-sm-n1{margin-left:-4px !important}.mx-sm-1{margin-right:4px !important;margin-left:4px !important}.my-sm-1{margin-top:4px !important;margin-bottom:4px !important}.m-sm-2{margin:8px !important}.mt-sm-2{margin-top:8px !important}.mr-sm-2{margin-right:8px !important}.mb-sm-2{margin-bottom:8px !important}.ml-sm-2{margin-left:8px !important}.mt-sm-n2{margin-top:-8px !important}.mr-sm-n2{margin-right:-8px !important}.mb-sm-n2{margin-bottom:-8px !important}.ml-sm-n2{margin-left:-8px !important}.mx-sm-2{margin-right:8px !important;margin-left:8px !important}.my-sm-2{margin-top:8px !important;margin-bottom:8px !important}.m-sm-3{margin:16px !important}.mt-sm-3{margin-top:16px !important}.mr-sm-3{margin-right:16px !important}.mb-sm-3{margin-bottom:16px !important}.ml-sm-3{margin-left:16px !important}.mt-sm-n3{margin-top:-16px !important}.mr-sm-n3{margin-right:-16px !important}.mb-sm-n3{margin-bottom:-16px !important}.ml-sm-n3{margin-left:-16px !important}.mx-sm-3{margin-right:16px !important;margin-left:16px !important}.my-sm-3{margin-top:16px !important;margin-bottom:16px !important}.m-sm-4{margin:24px !important}.mt-sm-4{margin-top:24px !important}.mr-sm-4{margin-right:24px !important}.mb-sm-4{margin-bottom:24px !important}.ml-sm-4{margin-left:24px !important}.mt-sm-n4{margin-top:-24px !important}.mr-sm-n4{margin-right:-24px !important}.mb-sm-n4{margin-bottom:-24px !important}.ml-sm-n4{margin-left:-24px !important}.mx-sm-4{margin-right:24px !important;margin-left:24px !important}.my-sm-4{margin-top:24px !important;margin-bottom:24px !important}.m-sm-5{margin:32px !important}.mt-sm-5{margin-top:32px !important}.mr-sm-5{margin-right:32px !important}.mb-sm-5{margin-bottom:32px !important}.ml-sm-5{margin-left:32px !important}.mt-sm-n5{margin-top:-32px !important}.mr-sm-n5{margin-right:-32px !important}.mb-sm-n5{margin-bottom:-32px !important}.ml-sm-n5{margin-left:-32px !important}.mx-sm-5{margin-right:32px !important;margin-left:32px !important}.my-sm-5{margin-top:32px !important;margin-bottom:32px !important}.m-sm-6{margin:40px !important}.mt-sm-6{margin-top:40px !important}.mr-sm-6{margin-right:40px !important}.mb-sm-6{margin-bottom:40px !important}.ml-sm-6{margin-left:40px !important}.mt-sm-n6{margin-top:-40px !important}.mr-sm-n6{margin-right:-40px !important}.mb-sm-n6{margin-bottom:-40px !important}.ml-sm-n6{margin-left:-40px !important}.mx-sm-6{margin-right:40px !important;margin-left:40px !important}.my-sm-6{margin-top:40px !important;margin-bottom:40px !important}.mx-sm-auto{margin-right:auto !important;margin-left:auto !important}}@media (min-width: 768px){.m-md-0{margin:0 !important}.mt-md-0{margin-top:0 !important}.mr-md-0{margin-right:0 !important}.mb-md-0{margin-bottom:0 !important}.ml-md-0{margin-left:0 !important}.mx-md-0{margin-right:0 !important;margin-left:0 !important}.my-md-0{margin-top:0 !important;margin-bottom:0 !important}.m-md-1{margin:4px !important}.mt-md-1{margin-top:4px !important}.mr-md-1{margin-right:4px !important}.mb-md-1{margin-bottom:4px !important}.ml-md-1{margin-left:4px !important}.mt-md-n1{margin-top:-4px !important}.mr-md-n1{margin-right:-4px !important}.mb-md-n1{margin-bottom:-4px !important}.ml-md-n1{margin-left:-4px !important}.mx-md-1{margin-right:4px !important;margin-left:4px !important}.my-md-1{margin-top:4px !important;margin-bottom:4px !important}.m-md-2{margin:8px !important}.mt-md-2{margin-top:8px !important}.mr-md-2{margin-right:8px !important}.mb-md-2{margin-bottom:8px !important}.ml-md-2{margin-left:8px !important}.mt-md-n2{margin-top:-8px !important}.mr-md-n2{margin-right:-8px !important}.mb-md-n2{margin-bottom:-8px !important}.ml-md-n2{margin-left:-8px !important}.mx-md-2{margin-right:8px !important;margin-left:8px !important}.my-md-2{margin-top:8px !important;margin-bottom:8px !important}.m-md-3{margin:16px !important}.mt-md-3{margin-top:16px !important}.mr-md-3{margin-right:16px !important}.mb-md-3{margin-bottom:16px !important}.ml-md-3{margin-left:16px !important}.mt-md-n3{margin-top:-16px !important}.mr-md-n3{margin-right:-16px !important}.mb-md-n3{margin-bottom:-16px !important}.ml-md-n3{margin-left:-16px !important}.mx-md-3{margin-right:16px !important;margin-left:16px !important}.my-md-3{margin-top:16px !important;margin-bottom:16px !important}.m-md-4{margin:24px !important}.mt-md-4{margin-top:24px !important}.mr-md-4{margin-right:24px !important}.mb-md-4{margin-bottom:24px !important}.ml-md-4{margin-left:24px !important}.mt-md-n4{margin-top:-24px !important}.mr-md-n4{margin-right:-24px !important}.mb-md-n4{margin-bottom:-24px !important}.ml-md-n4{margin-left:-24px !important}.mx-md-4{margin-right:24px !important;margin-left:24px !important}.my-md-4{margin-top:24px !important;margin-bottom:24px !important}.m-md-5{margin:32px !important}.mt-md-5{margin-top:32px !important}.mr-md-5{margin-right:32px !important}.mb-md-5{margin-bottom:32px !important}.ml-md-5{margin-left:32px !important}.mt-md-n5{margin-top:-32px !important}.mr-md-n5{margin-right:-32px !important}.mb-md-n5{margin-bottom:-32px !important}.ml-md-n5{margin-left:-32px !important}.mx-md-5{margin-right:32px !important;margin-left:32px !important}.my-md-5{margin-top:32px !important;margin-bottom:32px !important}.m-md-6{margin:40px !important}.mt-md-6{margin-top:40px !important}.mr-md-6{margin-right:40px !important}.mb-md-6{margin-bottom:40px !important}.ml-md-6{margin-left:40px !important}.mt-md-n6{margin-top:-40px !important}.mr-md-n6{margin-right:-40px !important}.mb-md-n6{margin-bottom:-40px !important}.ml-md-n6{margin-left:-40px !important}.mx-md-6{margin-right:40px !important;margin-left:40px !important}.my-md-6{margin-top:40px !important;margin-bottom:40px !important}.mx-md-auto{margin-right:auto !important;margin-left:auto !important}}@media (min-width: 1012px){.m-lg-0{margin:0 !important}.mt-lg-0{margin-top:0 !important}.mr-lg-0{margin-right:0 !important}.mb-lg-0{margin-bottom:0 !important}.ml-lg-0{margin-left:0 !important}.mx-lg-0{margin-right:0 !important;margin-left:0 !important}.my-lg-0{margin-top:0 !important;margin-bottom:0 !important}.m-lg-1{margin:4px !important}.mt-lg-1{margin-top:4px !important}.mr-lg-1{margin-right:4px !important}.mb-lg-1{margin-bottom:4px !important}.ml-lg-1{margin-left:4px !important}.mt-lg-n1{margin-top:-4px !important}.mr-lg-n1{margin-right:-4px !important}.mb-lg-n1{margin-bottom:-4px !important}.ml-lg-n1{margin-left:-4px !important}.mx-lg-1{margin-right:4px !important;margin-left:4px !important}.my-lg-1{margin-top:4px !important;margin-bottom:4px !important}.m-lg-2{margin:8px !important}.mt-lg-2{margin-top:8px !important}.mr-lg-2{margin-right:8px !important}.mb-lg-2{margin-bottom:8px !important}.ml-lg-2{margin-left:8px !important}.mt-lg-n2{margin-top:-8px !important}.mr-lg-n2{margin-right:-8px !important}.mb-lg-n2{margin-bottom:-8px !important}.ml-lg-n2{margin-left:-8px !important}.mx-lg-2{margin-right:8px !important;margin-left:8px !important}.my-lg-2{margin-top:8px !important;margin-bottom:8px !important}.m-lg-3{margin:16px !important}.mt-lg-3{margin-top:16px !important}.mr-lg-3{margin-right:16px !important}.mb-lg-3{margin-bottom:16px !important}.ml-lg-3{margin-left:16px !important}.mt-lg-n3{margin-top:-16px !important}.mr-lg-n3{margin-right:-16px !important}.mb-lg-n3{margin-bottom:-16px !important}.ml-lg-n3{margin-left:-16px !important}.mx-lg-3{margin-right:16px !important;margin-left:16px !important}.my-lg-3{margin-top:16px !important;margin-bottom:16px !important}.m-lg-4{margin:24px !important}.mt-lg-4{margin-top:24px !important}.mr-lg-4{margin-right:24px !important}.mb-lg-4{margin-bottom:24px !important}.ml-lg-4{margin-left:24px !important}.mt-lg-n4{margin-top:-24px !important}.mr-lg-n4{margin-right:-24px !important}.mb-lg-n4{margin-bottom:-24px !important}.ml-lg-n4{margin-left:-24px !important}.mx-lg-4{margin-right:24px !important;margin-left:24px !important}.my-lg-4{margin-top:24px !important;margin-bottom:24px !important}.m-lg-5{margin:32px !important}.mt-lg-5{margin-top:32px !important}.mr-lg-5{margin-right:32px !important}.mb-lg-5{margin-bottom:32px !important}.ml-lg-5{margin-left:32px !important}.mt-lg-n5{margin-top:-32px !important}.mr-lg-n5{margin-right:-32px !important}.mb-lg-n5{margin-bottom:-32px !important}.ml-lg-n5{margin-left:-32px !important}.mx-lg-5{margin-right:32px !important;margin-left:32px !important}.my-lg-5{margin-top:32px !important;margin-bottom:32px !important}.m-lg-6{margin:40px !important}.mt-lg-6{margin-top:40px !important}.mr-lg-6{margin-right:40px !important}.mb-lg-6{margin-bottom:40px !important}.ml-lg-6{margin-left:40px !important}.mt-lg-n6{margin-top:-40px !important}.mr-lg-n6{margin-right:-40px !important}.mb-lg-n6{margin-bottom:-40px !important}.ml-lg-n6{margin-left:-40px !important}.mx-lg-6{margin-right:40px !important;margin-left:40px !important}.my-lg-6{margin-top:40px !important;margin-bottom:40px !important}.mx-lg-auto{margin-right:auto !important;margin-left:auto !important}}@media (min-width: 1280px){.m-xl-0{margin:0 !important}.mt-xl-0{margin-top:0 !important}.mr-xl-0{margin-right:0 !important}.mb-xl-0{margin-bottom:0 !important}.ml-xl-0{margin-left:0 !important}.mx-xl-0{margin-right:0 !important;margin-left:0 !important}.my-xl-0{margin-top:0 !important;margin-bottom:0 !important}.m-xl-1{margin:4px !important}.mt-xl-1{margin-top:4px !important}.mr-xl-1{margin-right:4px !important}.mb-xl-1{margin-bottom:4px !important}.ml-xl-1{margin-left:4px !important}.mt-xl-n1{margin-top:-4px !important}.mr-xl-n1{margin-right:-4px !important}.mb-xl-n1{margin-bottom:-4px !important}.ml-xl-n1{margin-left:-4px !important}.mx-xl-1{margin-right:4px !important;margin-left:4px !important}.my-xl-1{margin-top:4px !important;margin-bottom:4px !important}.m-xl-2{margin:8px !important}.mt-xl-2{margin-top:8px !important}.mr-xl-2{margin-right:8px !important}.mb-xl-2{margin-bottom:8px !important}.ml-xl-2{margin-left:8px !important}.mt-xl-n2{margin-top:-8px !important}.mr-xl-n2{margin-right:-8px !important}.mb-xl-n2{margin-bottom:-8px !important}.ml-xl-n2{margin-left:-8px !important}.mx-xl-2{margin-right:8px !important;margin-left:8px !important}.my-xl-2{margin-top:8px !important;margin-bottom:8px !important}.m-xl-3{margin:16px !important}.mt-xl-3{margin-top:16px !important}.mr-xl-3{margin-right:16px !important}.mb-xl-3{margin-bottom:16px !important}.ml-xl-3{margin-left:16px !important}.mt-xl-n3{margin-top:-16px !important}.mr-xl-n3{margin-right:-16px !important}.mb-xl-n3{margin-bottom:-16px !important}.ml-xl-n3{margin-left:-16px !important}.mx-xl-3{margin-right:16px !important;margin-left:16px !important}.my-xl-3{margin-top:16px !important;margin-bottom:16px !important}.m-xl-4{margin:24px !important}.mt-xl-4{margin-top:24px !important}.mr-xl-4{margin-right:24px !important}.mb-xl-4{margin-bottom:24px !important}.ml-xl-4{margin-left:24px !important}.mt-xl-n4{margin-top:-24px !important}.mr-xl-n4{margin-right:-24px !important}.mb-xl-n4{margin-bottom:-24px !important}.ml-xl-n4{margin-left:-24px !important}.mx-xl-4{margin-right:24px !important;margin-left:24px !important}.my-xl-4{margin-top:24px !important;margin-bottom:24px !important}.m-xl-5{margin:32px !important}.mt-xl-5{margin-top:32px !important}.mr-xl-5{margin-right:32px !important}.mb-xl-5{margin-bottom:32px !important}.ml-xl-5{margin-left:32px !important}.mt-xl-n5{margin-top:-32px !important}.mr-xl-n5{margin-right:-32px !important}.mb-xl-n5{margin-bottom:-32px !important}.ml-xl-n5{margin-left:-32px !important}.mx-xl-5{margin-right:32px !important;margin-left:32px !important}.my-xl-5{margin-top:32px !important;margin-bottom:32px !important}.m-xl-6{margin:40px !important}.mt-xl-6{margin-top:40px !important}.mr-xl-6{margin-right:40px !important}.mb-xl-6{margin-bottom:40px !important}.ml-xl-6{margin-left:40px !important}.mt-xl-n6{margin-top:-40px !important}.mr-xl-n6{margin-right:-40px !important}.mb-xl-n6{margin-bottom:-40px !important}.ml-xl-n6{margin-left:-40px !important}.mx-xl-6{margin-right:40px !important;margin-left:40px !important}.my-xl-6{margin-top:40px !important;margin-bottom:40px !important}.mx-xl-auto{margin-right:auto !important;margin-left:auto !important}}.p-0{padding:0 !important}.pt-0{padding-top:0 !important}.pr-0{padding-right:0 !important}.pb-0{padding-bottom:0 !important}.pl-0{padding-left:0 !important}.px-0{padding-right:0 !important;padding-left:0 !important}.py-0{padding-top:0 !important;padding-bottom:0 !important}.p-1{padding:4px !important}.pt-1{padding-top:4px !important}.pr-1{padding-right:4px !important}.pb-1{padding-bottom:4px !important}.pl-1{padding-left:4px !important}.px-1{padding-right:4px !important;padding-left:4px !important}.py-1{padding-top:4px !important;padding-bottom:4px !important}.p-2{padding:8px !important}.pt-2{padding-top:8px !important}.pr-2{padding-right:8px !important}.pb-2{padding-bottom:8px !important}.pl-2{padding-left:8px !important}.px-2{padding-right:8px !important;padding-left:8px !important}.py-2{padding-top:8px !important;padding-bottom:8px !important}.p-3{padding:16px !important}.pt-3{padding-top:16px !important}.pr-3{padding-right:16px !important}.pb-3{padding-bottom:16px !important}.pl-3{padding-left:16px !important}.px-3{padding-right:16px !important;padding-left:16px !important}.py-3{padding-top:16px !important;padding-bottom:16px !important}.p-4{padding:24px !important}.pt-4{padding-top:24px !important}.pr-4{padding-right:24px !important}.pb-4{padding-bottom:24px !important}.pl-4{padding-left:24px !important}.px-4{padding-right:24px !important;padding-left:24px !important}.py-4{padding-top:24px !important;padding-bottom:24px !important}.p-5{padding:32px !important}.pt-5{padding-top:32px !important}.pr-5{padding-right:32px !important}.pb-5{padding-bottom:32px !important}.pl-5{padding-left:32px !important}.px-5{padding-right:32px !important;padding-left:32px !important}.py-5{padding-top:32px !important;padding-bottom:32px !important}.p-6{padding:40px !important}.pt-6{padding-top:40px !important}.pr-6{padding-right:40px !important}.pb-6{padding-bottom:40px !important}.pl-6{padding-left:40px !important}.px-6{padding-right:40px !important;padding-left:40px !important}.py-6{padding-top:40px !important;padding-bottom:40px !important}@media (min-width: 544px){.p-sm-0{padding:0 !important}.pt-sm-0{padding-top:0 !important}.pr-sm-0{padding-right:0 !important}.pb-sm-0{padding-bottom:0 !important}.pl-sm-0{padding-left:0 !important}.px-sm-0{padding-right:0 !important;padding-left:0 !important}.py-sm-0{padding-top:0 !important;padding-bottom:0 !important}.p-sm-1{padding:4px !important}.pt-sm-1{padding-top:4px !important}.pr-sm-1{padding-right:4px !important}.pb-sm-1{padding-bottom:4px !important}.pl-sm-1{padding-left:4px !important}.px-sm-1{padding-right:4px !important;padding-left:4px !important}.py-sm-1{padding-top:4px !important;padding-bottom:4px !important}.p-sm-2{padding:8px !important}.pt-sm-2{padding-top:8px !important}.pr-sm-2{padding-right:8px !important}.pb-sm-2{padding-bottom:8px !important}.pl-sm-2{padding-left:8px !important}.px-sm-2{padding-right:8px !important;padding-left:8px !important}.py-sm-2{padding-top:8px !important;padding-bottom:8px !important}.p-sm-3{padding:16px !important}.pt-sm-3{padding-top:16px !important}.pr-sm-3{padding-right:16px !important}.pb-sm-3{padding-bottom:16px !important}.pl-sm-3{padding-left:16px !important}.px-sm-3{padding-right:16px !important;padding-left:16px !important}.py-sm-3{padding-top:16px !important;padding-bottom:16px !important}.p-sm-4{padding:24px !important}.pt-sm-4{padding-top:24px !important}.pr-sm-4{padding-right:24px !important}.pb-sm-4{padding-bottom:24px !important}.pl-sm-4{padding-left:24px !important}.px-sm-4{padding-right:24px !important;padding-left:24px !important}.py-sm-4{padding-top:24px !important;padding-bottom:24px !important}.p-sm-5{padding:32px !important}.pt-sm-5{padding-top:32px !important}.pr-sm-5{padding-right:32px !important}.pb-sm-5{padding-bottom:32px !important}.pl-sm-5{padding-left:32px !important}.px-sm-5{padding-right:32px !important;padding-left:32px !important}.py-sm-5{padding-top:32px !important;padding-bottom:32px !important}.p-sm-6{padding:40px !important}.pt-sm-6{padding-top:40px !important}.pr-sm-6{padding-right:40px !important}.pb-sm-6{padding-bottom:40px !important}.pl-sm-6{padding-left:40px !important}.px-sm-6{padding-right:40px !important;padding-left:40px !important}.py-sm-6{padding-top:40px !important;padding-bottom:40px !important}}@media (min-width: 768px){.p-md-0{padding:0 !important}.pt-md-0{padding-top:0 !important}.pr-md-0{padding-right:0 !important}.pb-md-0{padding-bottom:0 !important}.pl-md-0{padding-left:0 !important}.px-md-0{padding-right:0 !important;padding-left:0 !important}.py-md-0{padding-top:0 !important;padding-bottom:0 !important}.p-md-1{padding:4px !important}.pt-md-1{padding-top:4px !important}.pr-md-1{padding-right:4px !important}.pb-md-1{padding-bottom:4px !important}.pl-md-1{padding-left:4px !important}.px-md-1{padding-right:4px !important;padding-left:4px !important}.py-md-1{padding-top:4px !important;padding-bottom:4px !important}.p-md-2{padding:8px !important}.pt-md-2{padding-top:8px !important}.pr-md-2{padding-right:8px !important}.pb-md-2{padding-bottom:8px !important}.pl-md-2{padding-left:8px !important}.px-md-2{padding-right:8px !important;padding-left:8px !important}.py-md-2{padding-top:8px !important;padding-bottom:8px !important}.p-md-3{padding:16px !important}.pt-md-3{padding-top:16px !important}.pr-md-3{padding-right:16px !important}.pb-md-3{padding-bottom:16px !important}.pl-md-3{padding-left:16px !important}.px-md-3{padding-right:16px !important;padding-left:16px !important}.py-md-3{padding-top:16px !important;padding-bottom:16px !important}.p-md-4{padding:24px !important}.pt-md-4{padding-top:24px !important}.pr-md-4{padding-right:24px !important}.pb-md-4{padding-bottom:24px !important}.pl-md-4{padding-left:24px !important}.px-md-4{padding-right:24px !important;padding-left:24px !important}.py-md-4{padding-top:24px !important;padding-bottom:24px !important}.p-md-5{padding:32px !important}.pt-md-5{padding-top:32px !important}.pr-md-5{padding-right:32px !important}.pb-md-5{padding-bottom:32px !important}.pl-md-5{padding-left:32px !important}.px-md-5{padding-right:32px !important;padding-left:32px !important}.py-md-5{padding-top:32px !important;padding-bottom:32px !important}.p-md-6{padding:40px !important}.pt-md-6{padding-top:40px !important}.pr-md-6{padding-right:40px !important}.pb-md-6{padding-bottom:40px !important}.pl-md-6{padding-left:40px !important}.px-md-6{padding-right:40px !important;padding-left:40px !important}.py-md-6{padding-top:40px !important;padding-bottom:40px !important}}@media (min-width: 1012px){.p-lg-0{padding:0 !important}.pt-lg-0{padding-top:0 !important}.pr-lg-0{padding-right:0 !important}.pb-lg-0{padding-bottom:0 !important}.pl-lg-0{padding-left:0 !important}.px-lg-0{padding-right:0 !important;padding-left:0 !important}.py-lg-0{padding-top:0 !important;padding-bottom:0 !important}.p-lg-1{padding:4px !important}.pt-lg-1{padding-top:4px !important}.pr-lg-1{padding-right:4px !important}.pb-lg-1{padding-bottom:4px !important}.pl-lg-1{padding-left:4px !important}.px-lg-1{padding-right:4px !important;padding-left:4px !important}.py-lg-1{padding-top:4px !important;padding-bottom:4px !important}.p-lg-2{padding:8px !important}.pt-lg-2{padding-top:8px !important}.pr-lg-2{padding-right:8px !important}.pb-lg-2{padding-bottom:8px !important}.pl-lg-2{padding-left:8px !important}.px-lg-2{padding-right:8px !important;padding-left:8px !important}.py-lg-2{padding-top:8px !important;padding-bottom:8px !important}.p-lg-3{padding:16px !important}.pt-lg-3{padding-top:16px !important}.pr-lg-3{padding-right:16px !important}.pb-lg-3{padding-bottom:16px !important}.pl-lg-3{padding-left:16px !important}.px-lg-3{padding-right:16px !important;padding-left:16px !important}.py-lg-3{padding-top:16px !important;padding-bottom:16px !important}.p-lg-4{padding:24px !important}.pt-lg-4{padding-top:24px !important}.pr-lg-4{padding-right:24px !important}.pb-lg-4{padding-bottom:24px !important}.pl-lg-4{padding-left:24px !important}.px-lg-4{padding-right:24px !important;padding-left:24px !important}.py-lg-4{padding-top:24px !important;padding-bottom:24px !important}.p-lg-5{padding:32px !important}.pt-lg-5{padding-top:32px !important}.pr-lg-5{padding-right:32px !important}.pb-lg-5{padding-bottom:32px !important}.pl-lg-5{padding-left:32px !important}.px-lg-5{padding-right:32px !important;padding-left:32px !important}.py-lg-5{padding-top:32px !important;padding-bottom:32px !important}.p-lg-6{padding:40px !important}.pt-lg-6{padding-top:40px !important}.pr-lg-6{padding-right:40px !important}.pb-lg-6{padding-bottom:40px !important}.pl-lg-6{padding-left:40px !important}.px-lg-6{padding-right:40px !important;padding-left:40px !important}.py-lg-6{padding-top:40px !important;padding-bottom:40px !important}}@media (min-width: 1280px){.p-xl-0{padding:0 !important}.pt-xl-0{padding-top:0 !important}.pr-xl-0{padding-right:0 !important}.pb-xl-0{padding-bottom:0 !important}.pl-xl-0{padding-left:0 !important}.px-xl-0{padding-right:0 !important;padding-left:0 !important}.py-xl-0{padding-top:0 !important;padding-bottom:0 !important}.p-xl-1{padding:4px !important}.pt-xl-1{padding-top:4px !important}.pr-xl-1{padding-right:4px !important}.pb-xl-1{padding-bottom:4px !important}.pl-xl-1{padding-left:4px !important}.px-xl-1{padding-right:4px !important;padding-left:4px !important}.py-xl-1{padding-top:4px !important;padding-bottom:4px !important}.p-xl-2{padding:8px !important}.pt-xl-2{padding-top:8px !important}.pr-xl-2{padding-right:8px !important}.pb-xl-2{padding-bottom:8px !important}.pl-xl-2{padding-left:8px !important}.px-xl-2{padding-right:8px !important;padding-left:8px !important}.py-xl-2{padding-top:8px !important;padding-bottom:8px !important}.p-xl-3{padding:16px !important}.pt-xl-3{padding-top:16px !important}.pr-xl-3{padding-right:16px !important}.pb-xl-3{padding-bottom:16px !important}.pl-xl-3{padding-left:16px !important}.px-xl-3{padding-right:16px !important;padding-left:16px !important}.py-xl-3{padding-top:16px !important;padding-bottom:16px !important}.p-xl-4{padding:24px !important}.pt-xl-4{padding-top:24px !important}.pr-xl-4{padding-right:24px !important}.pb-xl-4{padding-bottom:24px !important}.pl-xl-4{padding-left:24px !important}.px-xl-4{padding-right:24px !important;padding-left:24px !important}.py-xl-4{padding-top:24px !important;padding-bottom:24px !important}.p-xl-5{padding:32px !important}.pt-xl-5{padding-top:32px !important}.pr-xl-5{padding-right:32px !important}.pb-xl-5{padding-bottom:32px !important}.pl-xl-5{padding-left:32px !important}.px-xl-5{padding-right:32px !important;padding-left:32px !important}.py-xl-5{padding-top:32px !important;padding-bottom:32px !important}.p-xl-6{padding:40px !important}.pt-xl-6{padding-top:40px !important}.pr-xl-6{padding-right:40px !important}.pb-xl-6{padding-bottom:40px !important}.pl-xl-6{padding-left:40px !important}.px-xl-6{padding-right:40px !important;padding-left:40px !important}.py-xl-6{padding-top:40px !important;padding-bottom:40px !important}}.p-responsive{padding-right:16px !important;padding-left:16px !important}@media (min-width: 544px){.p-responsive{padding-right:40px !important;padding-left:40px !important}}@media (min-width: 1012px){.p-responsive{padding-right:16px !important;padding-left:16px !important}}.h1{font-size:26px !important}@media (min-width: 768px){.h1{font-size:32px !important}}.h2{font-size:22px !important}@media (min-width: 768px){.h2{font-size:24px !important}}.h3{font-size:18px !important}@media (min-width: 768px){.h3{font-size:20px !important}}.h4{font-size:16px !important}.h5{font-size:14px !important}.h6{font-size:12px !important}.h1,.h2,.h3,.h4,.h5,.h6{font-weight:600 !important}.f1{font-size:26px !important}@media (min-width: 768px){.f1{font-size:32px !important}}.f2{font-size:22px !important}@media (min-width: 768px){.f2{font-size:24px !important}}.f3{font-size:18px !important}@media (min-width: 768px){.f3{font-size:20px !important}}.f4{font-size:16px !important}@media (min-width: 768px){.f4{font-size:16px !important}}.f5{font-size:14px !important}.f6{font-size:12px !important}.f00-light{font-size:40px !important;font-weight:300 !important}@media (min-width: 768px){.f00-light{font-size:48px !important}}.f0-light{font-size:32px !important;font-weight:300 !important}@media (min-width: 768px){.f0-light{font-size:40px !important}}.f1-light{font-size:26px !important;font-weight:300 !important}@media (min-width: 768px){.f1-light{font-size:32px !important}}.f2-light{font-size:22px !important;font-weight:300 !important}@media (min-width: 768px){.f2-light{font-size:24px !important}}.f3-light{font-size:18px !important;font-weight:300 !important}@media (min-width: 768px){.f3-light{font-size:20px !important}}.text-small{font-size:12px !important}.lead{margin-bottom:30px;font-size:20px;font-weight:300;color:#586069}.lh-condensed-ultra{line-height:1 !important}.lh-condensed{line-height:1.25 !important}.lh-default{line-height:1.5 !important}.lh-0{line-height:0 !important}.text-right{text-align:right !important}.text-left{text-align:left !important}.text-center{text-align:center !important}@media (min-width: 544px){.text-sm-right{text-align:right !important}.text-sm-left{text-align:left !important}.text-sm-center{text-align:center !important}}@media (min-width: 768px){.text-md-right{text-align:right !important}.text-md-left{text-align:left !important}.text-md-center{text-align:center !important}}@media (min-width: 1012px){.text-lg-right{text-align:right !important}.text-lg-left{text-align:left !important}.text-lg-center{text-align:center !important}}@media (min-width: 1280px){.text-xl-right{text-align:right !important}.text-xl-left{text-align:left !important}.text-xl-center{text-align:center !important}}.text-normal{font-weight:400 !important}.text-bold{font-weight:600 !important}.text-italic{font-style:italic !important}.text-uppercase{text-transform:uppercase !important}.text-underline{text-decoration:underline !important}.no-underline{text-decoration:none !important}.no-wrap{white-space:nowrap !important}.ws-normal{white-space:normal !important}.wb-break-all{word-break:break-all !important}.text-emphasized{font-weight:600;color:#24292e}.list-style-none{list-style:none !important}.text-shadow-dark{text-shadow:0 1px 1px rgba(27,31,35,0.25),0 1px 25px rgba(27,31,35,0.75)}.text-shadow-light{text-shadow:0 1px 0 rgba(255,255,255,0.5)}.text-mono{font-family:"SFMono-Regular",Consolas,"Liberation Mono",Menlo,Courier,monospace}.user-select-none{-webkit-user-select:none !important;-moz-user-select:none !important;-ms-user-select:none !important;user-select:none !important}.d-block{display:block !important}.d-flex{display:flex !important}.d-inline{display:inline !important}.d-inline-block{display:inline-block !important}.d-inline-flex{display:inline-flex !important}.d-none{display:none !important}.d-table{display:table !important}.d-table-cell{display:table-cell !important}@media (min-width: 544px){.d-sm-block{display:block !important}.d-sm-flex{display:flex !important}.d-sm-inline{display:inline !important}.d-sm-inline-block{display:inline-block !important}.d-sm-inline-flex{display:inline-flex !important}.d-sm-none{display:none !important}.d-sm-table{display:table !important}.d-sm-table-cell{display:table-cell !important}}@media (min-width: 768px){.d-md-block{display:block !important}.d-md-flex{display:flex !important}.d-md-inline{display:inline !important}.d-md-inline-block{display:inline-block !important}.d-md-inline-flex{display:inline-flex !important}.d-md-none{display:none !important}.d-md-table{display:table !important}.d-md-table-cell{display:table-cell !important}}@media (min-width: 1012px){.d-lg-block{display:block !important}.d-lg-flex{display:flex !important}.d-lg-inline{display:inline !important}.d-lg-inline-block{display:inline-block !important}.d-lg-inline-flex{display:inline-flex !important}.d-lg-none{display:none !important}.d-lg-table{display:table !important}.d-lg-table-cell{display:table-cell !important}}@media (min-width: 1280px){.d-xl-block{display:block !important}.d-xl-flex{display:flex !important}.d-xl-inline{display:inline !important}.d-xl-inline-block{display:inline-block !important}.d-xl-inline-flex{display:inline-flex !important}.d-xl-none{display:none !important}.d-xl-table{display:table !important}.d-xl-table-cell{display:table-cell !important}}.v-hidden{visibility:hidden !important}.v-visible{visibility:visible !important}@media (max-width: 544px){.hide-sm{display:none !important}}@media (min-width: 544px) and (max-width: 768px){.hide-md{display:none !important}}@media (min-width: 768px) and (max-width: 1012px){.hide-lg{display:none !important}}@media (min-width: 1012px){.hide-xl{display:none !important}}.table-fixed{table-layout:fixed !important}.sr-only{position:absolute;width:1px;height:1px;padding:0;overflow:hidden;clip:rect(0, 0, 0, 0);word-wrap:normal;border:0}.show-on-focus{position:absolute;width:1px;height:1px;margin:0;overflow:hidden;clip:rect(1px, 1px, 1px, 1px)}.show-on-focus:focus{z-index:20;width:auto;height:auto;clip:auto}.container{width:980px;margin-right:auto;margin-left:auto}.container::before{display:table;content:""}.container::after{display:table;clear:both;content:""}.container-md{max-width:768px;margin-right:auto;margin-left:auto}.container-lg{max-width:1012px;margin-right:auto;margin-left:auto}.container-xl{max-width:1280px;margin-right:auto;margin-left:auto}.columns{margin-right:-10px;margin-left:-10px}.columns::before{display:table;content:""}.columns::after{display:table;clear:both;content:""}.column{float:left;padding-right:10px;padding-left:10px}.one-third{width:33.333333%}.two-thirds{width:66.666667%}.one-fourth{width:25%}.one-half{width:50%}.three-fourths{width:75%}.one-fifth{width:20%}.four-fifths{width:80%}.centered{display:block;float:none;margin-right:auto;margin-left:auto}.col-1{width:8.3333333333%}.col-2{width:16.6666666667%}.col-3{width:25%}.col-4{width:33.3333333333%}.col-5{width:41.6666666667%}.col-6{width:50%}.col-7{width:58.3333333333%}.col-8{width:66.6666666667%}.col-9{width:75%}.col-10{width:83.3333333333%}.col-11{width:91.6666666667%}.col-12{width:100%}@media (min-width: 544px){.col-sm-1{width:8.3333333333%}.col-sm-2{width:16.6666666667%}.col-sm-3{width:25%}.col-sm-4{width:33.3333333333%}.col-sm-5{width:41.6666666667%}.col-sm-6{width:50%}.col-sm-7{width:58.3333333333%}.col-sm-8{width:66.6666666667%}.col-sm-9{width:75%}.col-sm-10{width:83.3333333333%}.col-sm-11{width:91.6666666667%}.col-sm-12{width:100%}}@media (min-width: 768px){.col-md-1{width:8.3333333333%}.col-md-2{width:16.6666666667%}.col-md-3{width:25%}.col-md-4{width:33.3333333333%}.col-md-5{width:41.6666666667%}.col-md-6{width:50%}.col-md-7{width:58.3333333333%}.col-md-8{width:66.6666666667%}.col-md-9{width:75%}.col-md-10{width:83.3333333333%}.col-md-11{width:91.6666666667%}.col-md-12{width:100%}}@media (min-width: 1012px){.col-lg-1{width:8.3333333333%}.col-lg-2{width:16.6666666667%}.col-lg-3{width:25%}.col-lg-4{width:33.3333333333%}.col-lg-5{width:41.6666666667%}.col-lg-6{width:50%}.col-lg-7{width:58.3333333333%}.col-lg-8{width:66.6666666667%}.col-lg-9{width:75%}.col-lg-10{width:83.3333333333%}.col-lg-11{width:91.6666666667%}.col-lg-12{width:100%}}@media (min-width: 1280px){.col-xl-1{width:8.3333333333%}.col-xl-2{width:16.6666666667%}.col-xl-3{width:25%}.col-xl-4{width:33.3333333333%}.col-xl-5{width:41.6666666667%}.col-xl-6{width:50%}.col-xl-7{width:58.3333333333%}.col-xl-8{width:66.6666666667%}.col-xl-9{width:75%}.col-xl-10{width:83.3333333333%}.col-xl-11{width:91.6666666667%}.col-xl-12{width:100%}}.gutter{margin-right:-16px;margin-left:-16px}.gutter>[class*="col-"]{padding-right:16px !important;padding-left:16px !important}.gutter-condensed{margin-right:-8px;margin-left:-8px}.gutter-condensed>[class*="col-"]{padding-right:8px !important;padding-left:8px !important}.gutter-spacious{margin-right:-24px;margin-left:-24px}.gutter-spacious>[class*="col-"]{padding-right:24px !important;padding-left:24px !important}@media (min-width: 544px){.gutter-sm{margin-right:-16px;margin-left:-16px}.gutter-sm>[class*="col-"]{padding-right:16px !important;padding-left:16px !important}.gutter-sm-condensed{margin-right:-8px;margin-left:-8px}.gutter-sm-condensed>[class*="col-"]{padding-right:8px !important;padding-left:8px !important}.gutter-sm-spacious{margin-right:-24px;margin-left:-24px}.gutter-sm-spacious>[class*="col-"]{padding-right:24px !important;padding-left:24px !important}}@media (min-width: 768px){.gutter-md{margin-right:-16px;margin-left:-16px}.gutter-md>[class*="col-"]{padding-right:16px !important;padding-left:16px !important}.gutter-md-condensed{margin-right:-8px;margin-left:-8px}.gutter-md-condensed>[class*="col-"]{padding-right:8px !important;padding-left:8px !important}.gutter-md-spacious{margin-right:-24px;margin-left:-24px}.gutter-md-spacious>[class*="col-"]{padding-right:24px !important;padding-left:24px !important}}@media (min-width: 1012px){.gutter-lg{margin-right:-16px;margin-left:-16px}.gutter-lg>[class*="col-"]{padding-right:16px !important;padding-left:16px !important}.gutter-lg-condensed{margin-right:-8px;margin-left:-8px}.gutter-lg-condensed>[class*="col-"]{padding-right:8px !important;padding-left:8px !important}.gutter-lg-spacious{margin-right:-24px;margin-left:-24px}.gutter-lg-spacious>[class*="col-"]{padding-right:24px !important;padding-left:24px !important}}@media (min-width: 1280px){.gutter-xl{margin-right:-16px;margin-left:-16px}.gutter-xl>[class*="col-"]{padding-right:16px !important;padding-left:16px !important}.gutter-xl-condensed{margin-right:-8px;margin-left:-8px}.gutter-xl-condensed>[class*="col-"]{padding-right:8px !important;padding-left:8px !important}.gutter-xl-spacious{margin-right:-24px;margin-left:-24px}.gutter-xl-spacious>[class*="col-"]{padding-right:24px !important;padding-left:24px !important}}.offset-1{margin-left:8.3333333333% !important}.offset-2{margin-left:16.6666666667% !important}.offset-3{margin-left:25% !important}.offset-4{margin-left:33.3333333333% !important}.offset-5{margin-left:41.6666666667% !important}.offset-6{margin-left:50% !important}.offset-7{margin-left:58.3333333333% !important}.offset-8{margin-left:66.6666666667% !important}.offset-9{margin-left:75% !important}.offset-10{margin-left:83.3333333333% !important}.offset-11{margin-left:91.6666666667% !important}@media (min-width: 544px){.offset-sm-1{margin-left:8.3333333333% !important}.offset-sm-2{margin-left:16.6666666667% !important}.offset-sm-3{margin-left:25% !important}.offset-sm-4{margin-left:33.3333333333% !important}.offset-sm-5{margin-left:41.6666666667% !important}.offset-sm-6{margin-left:50% !important}.offset-sm-7{margin-left:58.3333333333% !important}.offset-sm-8{margin-left:66.6666666667% !important}.offset-sm-9{margin-left:75% !important}.offset-sm-10{margin-left:83.3333333333% !important}.offset-sm-11{margin-left:91.6666666667% !important}}@media (min-width: 768px){.offset-md-1{margin-left:8.3333333333% !important}.offset-md-2{margin-left:16.6666666667% !important}.offset-md-3{margin-left:25% !important}.offset-md-4{margin-left:33.3333333333% !important}.offset-md-5{margin-left:41.6666666667% !important}.offset-md-6{margin-left:50% !important}.offset-md-7{margin-left:58.3333333333% !important}.offset-md-8{margin-left:66.6666666667% !important}.offset-md-9{margin-left:75% !important}.offset-md-10{margin-left:83.3333333333% !important}.offset-md-11{margin-left:91.6666666667% !important}}@media (min-width: 1012px){.offset-lg-1{margin-left:8.3333333333% !important}.offset-lg-2{margin-left:16.6666666667% !important}.offset-lg-3{margin-left:25% !important}.offset-lg-4{margin-left:33.3333333333% !important}.offset-lg-5{margin-left:41.6666666667% !important}.offset-lg-6{margin-left:50% !important}.offset-lg-7{margin-left:58.3333333333% !important}.offset-lg-8{margin-left:66.6666666667% !important}.offset-lg-9{margin-left:75% !important}.offset-lg-10{margin-left:83.3333333333% !important}.offset-lg-11{margin-left:91.6666666667% !important}}@media (min-width: 1280px){.offset-xl-1{margin-left:8.3333333333% !important}.offset-xl-2{margin-left:16.6666666667% !important}.offset-xl-3{margin-left:25% !important}.offset-xl-4{margin-left:33.3333333333% !important}.offset-xl-5{margin-left:41.6666666667% !important}.offset-xl-6{margin-left:50% !important}.offset-xl-7{margin-left:58.3333333333% !important}.offset-xl-8{margin-left:66.6666666667% !important}.offset-xl-9{margin-left:75% !important}.offset-xl-10{margin-left:83.3333333333% !important}.offset-xl-11{margin-left:91.6666666667% !important}}.markdown-body{font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Helvetica,Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol";font-size:16px;line-height:1.5;word-wrap:break-word}.markdown-body::before{display:table;content:""}.markdown-body::after{display:table;clear:both;content:""}.markdown-body>*:first-child{margin-top:0 !important}.markdown-body>*:last-child{margin-bottom:0 !important}.markdown-body a:not([href]){color:inherit;text-decoration:none}.markdown-body .absent{color:#cb2431}.markdown-body .anchor{float:left;padding-right:4px;margin-left:-20px;line-height:1}.markdown-body .anchor:focus{outline:none}.markdown-body p,.markdown-body blockquote,.markdown-body ul,.markdown-body ol,.markdown-body dl,.markdown-body table,.markdown-body pre{margin-top:0;margin-bottom:16px}.markdown-body hr{height:.25em;padding:0;margin:24px 0;background-color:#e1e4e8;border:0}.markdown-body blockquote{padding:0 1em;color:#6a737d;border-left:0.25em solid #dfe2e5}.markdown-body blockquote>:first-child{margin-top:0}.markdown-body blockquote>:last-child{margin-bottom:0}.markdown-body kbd{display:inline-block;padding:3px 5px;font-size:11px;line-height:10px;color:#444d56;vertical-align:middle;background-color:#fafbfc;border:solid 1px #c6cbd1;border-bottom-color:#959da5;border-radius:3px;box-shadow:inset 0 -1px 0 #959da5}.markdown-body h1,.markdown-body h2,.markdown-body h3,.markdown-body h4,.markdown-body h5,.markdown-body h6{margin-top:24px;margin-bottom:16px;font-weight:600;line-height:1.25}.markdown-body h1 .octicon-link,.markdown-body h2 .octicon-link,.markdown-body h3 .octicon-link,.markdown-body h4 .octicon-link,.markdown-body h5 .octicon-link,.markdown-body h6 .octicon-link{color:#1b1f23;vertical-align:middle;visibility:hidden}.markdown-body h1:hover .anchor,.markdown-body h2:hover .anchor,.markdown-body h3:hover .anchor,.markdown-body h4:hover .anchor,.markdown-body h5:hover .anchor,.markdown-body h6:hover .anchor{text-decoration:none}.markdown-body h1:hover .anchor .octicon-link,.markdown-body h2:hover .anchor .octicon-link,.markdown-body h3:hover .anchor .octicon-link,.markdown-body h4:hover .anchor .octicon-link,.markdown-body h5:hover .anchor .octicon-link,.markdown-body h6:hover .anchor .octicon-link{visibility:visible}.markdown-body h1 tt,.markdown-body h1 code,.markdown-body h2 tt,.markdown-body h2 code,.markdown-body h3 tt,.markdown-body h3 code,.markdown-body h4 tt,.markdown-body h4 code,.markdown-body h5 tt,.markdown-body h5 code,.markdown-body h6 tt,.markdown-body h6 code{font-size:inherit}.markdown-body h1{padding-bottom:0.3em;font-size:2em;border-bottom:1px solid #eaecef}.markdown-body h2{padding-bottom:0.3em;font-size:1.5em;border-bottom:1px solid #eaecef}.markdown-body h3{font-size:1.25em}.markdown-body h4{font-size:1em}.markdown-body h5{font-size:0.875em}.markdown-body h6{font-size:0.85em;color:#6a737d}.markdown-body ul,.markdown-body ol{padding-left:2em}.markdown-body ul.no-list,.markdown-body ol.no-list{padding:0;list-style-type:none}.markdown-body ul ul,.markdown-body ul ol,.markdown-body ol ol,.markdown-body ol ul{margin-top:0;margin-bottom:0}.markdown-body li{word-wrap:break-all}.markdown-body li>p{margin-top:16px}.markdown-body li+li{margin-top:.25em}.markdown-body dl{padding:0}.markdown-body dl dt{padding:0;margin-top:16px;font-size:1em;font-style:italic;font-weight:600}.markdown-body dl dd{padding:0 16px;margin-bottom:16px}.markdown-body table{display:block;width:100%;overflow:auto}.markdown-body table th{font-weight:600}.markdown-body table th,.markdown-body table td{padding:6px 13px;border:1px solid #dfe2e5}.markdown-body table tr{background-color:#fff;border-top:1px solid #c6cbd1}.markdown-body table tr:nth-child(2n){background-color:#f6f8fa}.markdown-body table img{background-color:transparent}.markdown-body img{max-width:100%;box-sizing:content-box;background-color:#fff}.markdown-body img[align=right]{padding-left:20px}.markdown-body img[align=left]{padding-right:20px}.markdown-body .emoji{max-width:none;vertical-align:text-top;background-color:transparent}.markdown-body span.frame{display:block;overflow:hidden}.markdown-body span.frame>span{display:block;float:left;width:auto;padding:7px;margin:13px 0 0;overflow:hidden;border:1px solid #dfe2e5}.markdown-body span.frame span img{display:block;float:left}.markdown-body span.frame span span{display:block;padding:5px 0 0;clear:both;color:#24292e}.markdown-body span.align-center{display:block;overflow:hidden;clear:both}.markdown-body span.align-center>span{display:block;margin:13px auto 0;overflow:hidden;text-align:center}.markdown-body span.align-center span img{margin:0 auto;text-align:center}.markdown-body span.align-right{display:block;overflow:hidden;clear:both}.markdown-body span.align-right>span{display:block;margin:13px 0 0;overflow:hidden;text-align:right}.markdown-body span.align-right span img{margin:0;text-align:right}.markdown-body span.float-left{display:block;float:left;margin-right:13px;overflow:hidden}.markdown-body span.float-left span{margin:13px 0 0}.markdown-body span.float-right{display:block;float:right;margin-left:13px;overflow:hidden}.markdown-body span.float-right>span{display:block;margin:13px auto 0;overflow:hidden;text-align:right}.markdown-body code,.markdown-body tt{padding:0.2em 0.4em;margin:0;font-size:85%;background-color:rgba(27,31,35,0.05);border-radius:3px}.markdown-body code br,.markdown-body tt br{display:none}.markdown-body del code{text-decoration:inherit}.markdown-body pre{word-wrap:normal}.markdown-body pre>code{padding:0;margin:0;font-size:100%;word-break:normal;white-space:pre;background:transparent;border:0}.markdown-body .highlight{margin-bottom:16px}.markdown-body .highlight pre{margin-bottom:0;word-break:normal}.markdown-body .highlight pre,.markdown-body pre{padding:16px;overflow:auto;font-size:85%;line-height:1.45;background-color:#f6f8fa;border-radius:3px}.markdown-body pre code,.markdown-body pre tt{display:inline;max-width:auto;padding:0;margin:0;overflow:visible;line-height:inherit;word-wrap:normal;background-color:transparent;border:0}.markdown-body .csv-data td,.markdown-body .csv-data th{padding:5px;overflow:hidden;font-size:12px;line-height:1;text-align:left;white-space:nowrap}.markdown-body .csv-data .blob-num{padding:10px 8px 9px;text-align:right;background:#fff;border:0}.markdown-body .csv-data tr{border-top:0}.markdown-body .csv-data th{font-weight:600;background:#f6f8fa;border-top:0}.highlight table td{padding:5px}.highlight table pre{margin:0}.highlight .cm{color:#999988;font-style:italic}.highlight .cp{color:#999999;font-weight:bold}.highlight .c1{color:#999988;font-style:italic}.highlight .cs{color:#999999;font-weight:bold;font-style:italic}.highlight .c,.highlight .cd{color:#999988;font-style:italic}.highlight .err{color:#a61717;background-color:#e3d2d2}.highlight .gd{color:#000000;background-color:#ffdddd}.highlight .ge{color:#000000;font-style:italic}.highlight .gr{color:#aa0000}.highlight .gh{color:#999999}.highlight .gi{color:#000000;background-color:#ddffdd}.highlight .go{color:#888888}.highlight .gp{color:#555555}.highlight .gs{font-weight:bold}.highlight .gu{color:#aaaaaa}.highlight .gt{color:#aa0000}.highlight .kc{color:#000000;font-weight:bold}.highlight .kd{color:#000000;font-weight:bold}.highlight .kn{color:#000000;font-weight:bold}.highlight .kp{color:#000000;font-weight:bold}.highlight .kr{color:#000000;font-weight:bold}.highlight .kt{color:#445588;font-weight:bold}.highlight .k,.highlight .kv{color:#000000;font-weight:bold}.highlight .mf{color:#009999}.highlight .mh{color:#009999}.highlight .il{color:#009999}.highlight .mi{color:#009999}.highlight .mo{color:#009999}.highlight .m,.highlight .mb,.highlight .mx{color:#009999}.highlight .sb{color:#d14}.highlight .sc{color:#d14}.highlight .sd{color:#d14}.highlight .s2{color:#d14}.highlight .se{color:#d14}.highlight .sh{color:#d14}.highlight .si{color:#d14}.highlight .sx{color:#d14}.highlight .sr{color:#009926}.highlight .s1{color:#d14}.highlight .ss{color:#990073}.highlight .s{color:#d14}.highlight .na{color:#008080}.highlight .bp{color:#999999}.highlight .nb{color:#0086B3}.highlight .nc{color:#445588;font-weight:bold}.highlight .no{color:#008080}.highlight .nd{color:#3c5d5d;font-weight:bold}.highlight .ni{color:#800080}.highlight .ne{color:#990000;font-weight:bold}.highlight .nf{color:#990000;font-weight:bold}.highlight .nl{color:#990000;font-weight:bold}.highlight .nn{color:#555555}.highlight .nt{color:#000080}.highlight .vc{color:#008080}.highlight .vg{color:#008080}.highlight .vi{color:#008080}.highlight .nv{color:#008080}.highlight .ow{color:#000000;font-weight:bold}.highlight .o{color:#000000;font-weight:bold}.highlight .w{color:#bbbbbb}.highlight{background-color:#f8f8f8} diff --git a/assets/hub/CODE_OF_CONDUCT.ipynb b/assets/hub/CODE_OF_CONDUCT.ipynb new file mode 100644 index 000000000000..363fcab7ed6e --- /dev/null +++ b/assets/hub/CODE_OF_CONDUCT.ipynb @@ -0,0 +1,6 @@ +{ + "cells": [], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/CONTRIBUTING.ipynb b/assets/hub/CONTRIBUTING.ipynb new file mode 100644 index 000000000000..363fcab7ed6e --- /dev/null +++ b/assets/hub/CONTRIBUTING.ipynb @@ -0,0 +1,6 @@ +{ + "cells": [], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/datvuthanh_hybridnets.ipynb b/assets/hub/datvuthanh_hybridnets.ipynb new file mode 100644 index 000000000000..725631351953 --- /dev/null +++ b/assets/hub/datvuthanh_hybridnets.ipynb @@ -0,0 +1,148 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "8b2b9fde", + "metadata": {}, + "source": [ + "### This notebook is optionally accelerated with a GPU runtime.\n", + "### If you would like to use this acceleration, please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# HybridNets\n", + "\n", + "*Author: Dat Vu Thanh*\n", + "\n", + "**HybridNets - End2End Perception Network**\n", + "\n", + "## Before You Start\n", + "\n", + "Start from a **Python>=3.7** environment with **PyTorch>=1.10** installed. To install PyTorch see [https://pytorch.org/get-started/locally/](https://pytorch.org/get-started/locally/). To install HybridNets dependencies:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "201537e7", + "metadata": {}, + "outputs": [], + "source": [ + "%%bash\n", + "pip install -qr https://raw.githubusercontent.com/datvuthanh/HybridNets/main/requirements.txt # install dependencies" + ] + }, + { + "cell_type": "markdown", + "id": "904debad", + "metadata": {}, + "source": [ + "## Model Description\n", + " \n", + " \n", + "\n", + "HybridNets is an end2end perception network for multi-tasks. Our work focused on traffic object detection, drivable area segmentation and lane detection. HybridNets can run real-time on embedded systems, and obtains SOTA Object Detection, Lane Detection on BDD100K Dataset.\n", + "\n", + "### Results\n", + "\n", + "### Traffic Object Detection\n", + "\n", + "| Model | Recall (%) | mAP@0.5 (%) |\n", + "|:------------------:|:------------:|:---------------:|\n", + "| `MultiNet` | 81.3 | 60.2 |\n", + "| `DLT-Net` | 89.4 | 68.4 |\n", + "| `Faster R-CNN` | 77.2 | 55.6 |\n", + "| `YOLOv5s` | 86.8 | 77.2 |\n", + "| `YOLOP` | 89.2 | 76.5 |\n", + "| **`HybridNets`** | **92.8** | **77.3** |\n", + "\n", + "\n", + " \n", + "### Drivable Area Segmentation\n", + "\n", + "| Model | Drivable mIoU (%) |\n", + "|:----------------:|:-----------------:|\n", + "| `MultiNet` | 71.6 |\n", + "| `DLT-Net` | 71.3 |\n", + "| `PSPNet` | 89.6 |\n", + "| `YOLOP` | 91.5 |\n", + "| **`HybridNets`** | **90.5** |\n", + "\n", + "\n", + " \n", + "### Lane Line Detection\n", + "\n", + "| Model | Accuracy (%) | Lane Line IoU (%) |\n", + "|:----------------:|:------------:|:-----------------:|\n", + "| `Enet` | 34.12 | 14.64 |\n", + "| `SCNN` | 35.79 | 15.84 |\n", + "| `Enet-SAD` | 36.56 | 16.02 |\n", + "| `YOLOP` | 70.5 | 26.2 |\n", + "| **`HybridNets`** | **85.4** | **31.6** |\n", + "\n", + "\n", + " \n", + "\n", + " \n", + " \n", + "### Load From PyTorch Hub\n", + "\n", + "This example loads the pretrained **HybridNets** model and passes an image for inference." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cf93a386", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "\n", + "# load model\n", + "model = torch.hub.load('datvuthanh/hybridnets', 'hybridnets', pretrained=True)\n", + "\n", + "#inference\n", + "img = torch.randn(1,3,640,384)\n", + "features, regression, classification, anchors, segmentation = model(img)" + ] + }, + { + "cell_type": "markdown", + "id": "72d7d3b7", + "metadata": {}, + "source": [ + "### Citation\n", + "\n", + "If you find our [paper](https://arxiv.org/abs/2203.09035) and [code](https://github.com/datvuthanh/HybridNets) useful for your research, please consider giving a star and citation:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a7454942", + "metadata": { + "attributes": { + "classes": [ + "BibTeX" + ], + "id": "" + } + }, + "outputs": [], + "source": [ + "@misc{vu2022hybridnets,\n", + " title={HybridNets: End-to-End Perception Network}, \n", + " author={Dat Vu and Bao Ngo and Hung Phan},\n", + " year={2022},\n", + " eprint={2203.09035},\n", + " archivePrefix={arXiv},\n", + " primaryClass={cs.CV}\n", + "}" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/facebookresearch_WSL-Images_resnext.ipynb b/assets/hub/facebookresearch_WSL-Images_resnext.ipynb index 223a36864282..1a3ab42a4513 100644 --- a/assets/hub/facebookresearch_WSL-Images_resnext.ipynb +++ b/assets/hub/facebookresearch_WSL-Images_resnext.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "9f147b54", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -21,6 +22,7 @@ { "cell_type": "code", "execution_count": null, + "id": "832144e5", "metadata": {}, "outputs": [], "source": [ @@ -37,6 +39,7 @@ }, { "cell_type": "markdown", + "id": "c0ed71a0", "metadata": {}, "source": [ "All pre-trained models expect input images normalized in the same way,\n", @@ -50,12 +53,13 @@ { "cell_type": "code", "execution_count": null, + "id": "141b0c84", "metadata": {}, "outputs": [], "source": [ "# Download an example image from the pytorch website\n", "import urllib\n", - "url, filename = (\"https://github.com/pytorch/hub/raw/master/dog.jpg\", \"dog.jpg\")\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n", "try: urllib.URLopener().retrieve(url, filename)\n", "except: urllib.request.urlretrieve(url, filename)" ] @@ -63,6 +67,7 @@ { "cell_type": "code", "execution_count": null, + "id": "e4cc168b", "metadata": {}, "outputs": [], "source": [ @@ -86,7 +91,7 @@ "\n", "with torch.no_grad():\n", " output = model(input_batch)\n", - "# Tensor of shape 1000, with confidence scores over Imagenet's 1000 classes\n", + "# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes\n", "print(output[0])\n", "# The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n", "print(torch.nn.functional.softmax(output[0], dim=0))\n" @@ -94,6 +99,7 @@ }, { "cell_type": "markdown", + "id": "aec83993", "metadata": {}, "source": [ "### Model Description\n", @@ -118,5 +124,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/facebookresearch_pytorch-gan-zoo_dcgan.ipynb b/assets/hub/facebookresearch_pytorch-gan-zoo_dcgan.ipynb index b2502de94025..c827d374ff3c 100644 --- a/assets/hub/facebookresearch_pytorch-gan-zoo_dcgan.ipynb +++ b/assets/hub/facebookresearch_pytorch-gan-zoo_dcgan.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "030ed4a8", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -21,6 +22,7 @@ { "cell_type": "code", "execution_count": null, + "id": "a90ec4cb", "metadata": {}, "outputs": [], "source": [ @@ -32,6 +34,7 @@ }, { "cell_type": "markdown", + "id": "6c8aeda3", "metadata": {}, "source": [ "The input to the model is a noise vector of shape `(N, 120)` where `N` is the number of images to be generated.\n", @@ -42,6 +45,7 @@ { "cell_type": "code", "execution_count": null, + "id": "81875052", "metadata": {}, "outputs": [], "source": [ @@ -59,6 +63,7 @@ }, { "cell_type": "markdown", + "id": "2c872e81", "metadata": {}, "source": [ "You should see an image similar to the one on the left.\n", @@ -85,5 +90,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/facebookresearch_pytorch-gan-zoo_pgan.ipynb b/assets/hub/facebookresearch_pytorch-gan-zoo_pgan.ipynb index 46f1aa89ddc1..332473017377 100644 --- a/assets/hub/facebookresearch_pytorch-gan-zoo_pgan.ipynb +++ b/assets/hub/facebookresearch_pytorch-gan-zoo_pgan.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "1a9e07b0", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -23,6 +24,7 @@ { "cell_type": "code", "execution_count": null, + "id": "bb726e33", "metadata": {}, "outputs": [], "source": [ @@ -42,6 +44,7 @@ }, { "cell_type": "markdown", + "id": "4f98bf01", "metadata": {}, "source": [ "The input to the model is a noise vector of shape `(N, 512)` where `N` is the number of images to be generated.\n", @@ -52,6 +55,7 @@ { "cell_type": "code", "execution_count": null, + "id": "fd7cf941", "metadata": {}, "outputs": [], "source": [ @@ -70,6 +74,7 @@ }, { "cell_type": "markdown", + "id": "14711d09", "metadata": {}, "source": [ "You should see an image similar to the one on the left.\n", @@ -88,11 +93,11 @@ "\n", "### References\n", "\n", - "- [Progressive Growing of GANs for Improved Quality, Stability, and Variation](https://arxiv.org/abs/1710.10196)" + "[1] Tero Karras et al, \"Progressive Growing of GANs for Improved Quality, Stability, and Variation\" https://arxiv.org/abs/1710.10196" ] } ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/facebookresearch_pytorchvideo_resnet.ipynb b/assets/hub/facebookresearch_pytorchvideo_resnet.ipynb new file mode 100644 index 000000000000..d5feb51d6d14 --- /dev/null +++ b/assets/hub/facebookresearch_pytorchvideo_resnet.ipynb @@ -0,0 +1,283 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "6d64c3e4", + "metadata": {}, + "source": [ + "# 3D ResNet\n", + "\n", + "*Author: FAIR PyTorchVideo*\n", + "\n", + "**Resnet Style Video classification networks pretrained on the Kinetics 400 dataset**\n", + "\n", + "\n", + "### Example Usage\n", + "\n", + "#### Imports\n", + "\n", + "Load the model:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c175cea7", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "# Choose the `slow_r50` model \n", + "model = torch.hub.load('facebookresearch/pytorchvideo', 'slow_r50', pretrained=True)" + ] + }, + { + "cell_type": "markdown", + "id": "2766a510", + "metadata": {}, + "source": [ + "Import remaining functions:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3864b118", + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "import urllib\n", + "from pytorchvideo.data.encoded_video import EncodedVideo\n", + "\n", + "from torchvision.transforms import Compose, Lambda\n", + "from torchvision.transforms._transforms_video import (\n", + " CenterCropVideo,\n", + " NormalizeVideo,\n", + ")\n", + "from pytorchvideo.transforms import (\n", + " ApplyTransformToKey,\n", + " ShortSideScale,\n", + " UniformTemporalSubsample\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "186b433c", + "metadata": {}, + "source": [ + "#### Setup\n", + "\n", + "Set the model to eval mode and move to desired device." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "30961fb0", + "metadata": { + "attributes": { + "classes": [ + "python " + ], + "id": "" + } + }, + "outputs": [], + "source": [ + "# Set to GPU or CPU\n", + "device = \"cpu\"\n", + "model = model.eval()\n", + "model = model.to(device)" + ] + }, + { + "cell_type": "markdown", + "id": "9635686e", + "metadata": {}, + "source": [ + "Download the id to label mapping for the Kinetics 400 dataset on which the torch hub models were trained. This will be used to get the category label names from the predicted class ids." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b91348b2", + "metadata": {}, + "outputs": [], + "source": [ + "json_url = \"https://dl.fbaipublicfiles.com/pyslowfast/dataset/class_names/kinetics_classnames.json\"\n", + "json_filename = \"kinetics_classnames.json\"\n", + "try: urllib.URLopener().retrieve(json_url, json_filename)\n", + "except: urllib.request.urlretrieve(json_url, json_filename)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c1738f26", + "metadata": {}, + "outputs": [], + "source": [ + "with open(json_filename, \"r\") as f:\n", + " kinetics_classnames = json.load(f)\n", + "\n", + "# Create an id to label name mapping\n", + "kinetics_id_to_classname = {}\n", + "for k, v in kinetics_classnames.items():\n", + " kinetics_id_to_classname[v] = str(k).replace('\"', \"\")" + ] + }, + { + "cell_type": "markdown", + "id": "20296321", + "metadata": {}, + "source": [ + "#### Define input transform" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7c2be1a5", + "metadata": {}, + "outputs": [], + "source": [ + "side_size = 256\n", + "mean = [0.45, 0.45, 0.45]\n", + "std = [0.225, 0.225, 0.225]\n", + "crop_size = 256\n", + "num_frames = 8\n", + "sampling_rate = 8\n", + "frames_per_second = 30\n", + "\n", + "# Note that this transform is specific to the slow_R50 model.\n", + "transform = ApplyTransformToKey(\n", + " key=\"video\",\n", + " transform=Compose(\n", + " [\n", + " UniformTemporalSubsample(num_frames),\n", + " Lambda(lambda x: x/255.0),\n", + " NormalizeVideo(mean, std),\n", + " ShortSideScale(\n", + " size=side_size\n", + " ),\n", + " CenterCropVideo(crop_size=(crop_size, crop_size))\n", + " ]\n", + " ),\n", + ")\n", + "\n", + "# The duration of the input clip is also specific to the model.\n", + "clip_duration = (num_frames * sampling_rate)/frames_per_second" + ] + }, + { + "cell_type": "markdown", + "id": "94f8fca0", + "metadata": {}, + "source": [ + "#### Run Inference\n", + "\n", + "Download an example video." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "db9231df", + "metadata": {}, + "outputs": [], + "source": [ + "url_link = \"https://dl.fbaipublicfiles.com/pytorchvideo/projects/archery.mp4\"\n", + "video_path = 'archery.mp4'\n", + "try: urllib.URLopener().retrieve(url_link, video_path)\n", + "except: urllib.request.urlretrieve(url_link, video_path)" + ] + }, + { + "cell_type": "markdown", + "id": "3f2a6373", + "metadata": {}, + "source": [ + "Load the video and transform it to the input format required by the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "43252e1e", + "metadata": {}, + "outputs": [], + "source": [ + "# Select the duration of the clip to load by specifying the start and end duration\n", + "# The start_sec should correspond to where the action occurs in the video\n", + "start_sec = 0\n", + "end_sec = start_sec + clip_duration\n", + "\n", + "# Initialize an EncodedVideo helper class and load the video\n", + "video = EncodedVideo.from_path(video_path)\n", + "\n", + "# Load the desired clip\n", + "video_data = video.get_clip(start_sec=start_sec, end_sec=end_sec)\n", + "\n", + "# Apply a transform to normalize the video input\n", + "video_data = transform(video_data)\n", + "\n", + "# Move the inputs to the desired device\n", + "inputs = video_data[\"video\"]\n", + "inputs = inputs.to(device)" + ] + }, + { + "cell_type": "markdown", + "id": "aa11e8e4", + "metadata": {}, + "source": [ + "#### Get Predictions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "36faf70e", + "metadata": {}, + "outputs": [], + "source": [ + "# Pass the input clip through the model\n", + "preds = model(inputs[None, ...])\n", + "\n", + "# Get the predicted classes\n", + "post_act = torch.nn.Softmax(dim=1)\n", + "preds = post_act(preds)\n", + "pred_classes = preds.topk(k=5).indices[0]\n", + "\n", + "# Map the predicted classes to the label names\n", + "pred_class_names = [kinetics_id_to_classname[int(i)] for i in pred_classes]\n", + "print(\"Top 5 predicted labels: %s\" % \", \".join(pred_class_names))" + ] + }, + { + "cell_type": "markdown", + "id": "654ea545", + "metadata": {}, + "source": [ + "### Model Description\n", + "The model architecture is based on [1] with pretrained weights using the 8x8 setting\n", + "on the Kinetics dataset. \n", + "\n", + "| arch | depth | frame length x sample rate | top 1 | top 5 | Flops (G) | Params (M) |\n", + "| --------------- | ----------- | ----------- | ----------- | ----------- | ----------- | ----------- |\n", + "| Slow | R50 | 8x8 | 74.58 | 91.63 | 54.52 | 32.45 |\n", + "\n", + "\n", + "### References\n", + "[1] Christoph Feichtenhofer et al, \"SlowFast Networks for Video Recognition\"\n", + "https://arxiv.org/pdf/1812.03982.pdf" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/facebookresearch_pytorchvideo_slowfast.ipynb b/assets/hub/facebookresearch_pytorchvideo_slowfast.ipynb new file mode 100644 index 000000000000..dc63f48ca96e --- /dev/null +++ b/assets/hub/facebookresearch_pytorchvideo_slowfast.ipynb @@ -0,0 +1,308 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "fa481569", + "metadata": {}, + "source": [ + "# SlowFast\n", + "\n", + "*Author: FAIR PyTorchVideo*\n", + "\n", + "**SlowFast networks pretrained on the Kinetics 400 dataset**\n", + "\n", + "\n", + "### Example Usage\n", + "\n", + "#### Imports\n", + "\n", + "Load the model:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "99e832d6", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "# Choose the `slowfast_r50` model \n", + "model = torch.hub.load('facebookresearch/pytorchvideo', 'slowfast_r50', pretrained=True)" + ] + }, + { + "cell_type": "markdown", + "id": "69054695", + "metadata": {}, + "source": [ + "Import remaining functions:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f5661afb", + "metadata": {}, + "outputs": [], + "source": [ + "from typing import Dict\n", + "import json\n", + "import urllib\n", + "from torchvision.transforms import Compose, Lambda\n", + "from torchvision.transforms._transforms_video import (\n", + " CenterCropVideo,\n", + " NormalizeVideo,\n", + ")\n", + "from pytorchvideo.data.encoded_video import EncodedVideo\n", + "from pytorchvideo.transforms import (\n", + " ApplyTransformToKey,\n", + " ShortSideScale,\n", + " UniformTemporalSubsample,\n", + " UniformCropVideo\n", + ") " + ] + }, + { + "cell_type": "markdown", + "id": "31d290f2", + "metadata": {}, + "source": [ + "#### Setup\n", + "\n", + "Set the model to eval mode and move to desired device." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "344f86e1", + "metadata": { + "attributes": { + "classes": [ + "python " + ], + "id": "" + } + }, + "outputs": [], + "source": [ + "# Set to GPU or CPU\n", + "device = \"cpu\"\n", + "model = model.eval()\n", + "model = model.to(device)" + ] + }, + { + "cell_type": "markdown", + "id": "b1afe37e", + "metadata": {}, + "source": [ + "Download the id to label mapping for the Kinetics 400 dataset on which the torch hub models were trained. This will be used to get the category label names from the predicted class ids." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "474dcf8d", + "metadata": {}, + "outputs": [], + "source": [ + "json_url = \"https://dl.fbaipublicfiles.com/pyslowfast/dataset/class_names/kinetics_classnames.json\"\n", + "json_filename = \"kinetics_classnames.json\"\n", + "try: urllib.URLopener().retrieve(json_url, json_filename)\n", + "except: urllib.request.urlretrieve(json_url, json_filename)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3fbdadaf", + "metadata": {}, + "outputs": [], + "source": [ + "with open(json_filename, \"r\") as f:\n", + " kinetics_classnames = json.load(f)\n", + "\n", + "# Create an id to label name mapping\n", + "kinetics_id_to_classname = {}\n", + "for k, v in kinetics_classnames.items():\n", + " kinetics_id_to_classname[v] = str(k).replace('\"', \"\")" + ] + }, + { + "cell_type": "markdown", + "id": "c04836fb", + "metadata": {}, + "source": [ + "#### Define input transform" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c242c0c0", + "metadata": {}, + "outputs": [], + "source": [ + "side_size = 256\n", + "mean = [0.45, 0.45, 0.45]\n", + "std = [0.225, 0.225, 0.225]\n", + "crop_size = 256\n", + "num_frames = 32\n", + "sampling_rate = 2\n", + "frames_per_second = 30\n", + "slowfast_alpha = 4\n", + "num_clips = 10\n", + "num_crops = 3\n", + "\n", + "class PackPathway(torch.nn.Module):\n", + " \"\"\"\n", + " Transform for converting video frames as a list of tensors. \n", + " \"\"\"\n", + " def __init__(self):\n", + " super().__init__()\n", + " \n", + " def forward(self, frames: torch.Tensor):\n", + " fast_pathway = frames\n", + " # Perform temporal sampling from the fast pathway.\n", + " slow_pathway = torch.index_select(\n", + " frames,\n", + " 1,\n", + " torch.linspace(\n", + " 0, frames.shape[1] - 1, frames.shape[1] // slowfast_alpha\n", + " ).long(),\n", + " )\n", + " frame_list = [slow_pathway, fast_pathway]\n", + " return frame_list\n", + "\n", + "transform = ApplyTransformToKey(\n", + " key=\"video\",\n", + " transform=Compose(\n", + " [\n", + " UniformTemporalSubsample(num_frames),\n", + " Lambda(lambda x: x/255.0),\n", + " NormalizeVideo(mean, std),\n", + " ShortSideScale(\n", + " size=side_size\n", + " ),\n", + " CenterCropVideo(crop_size),\n", + " PackPathway()\n", + " ]\n", + " ),\n", + ")\n", + "\n", + "# The duration of the input clip is also specific to the model.\n", + "clip_duration = (num_frames * sampling_rate)/frames_per_second" + ] + }, + { + "cell_type": "markdown", + "id": "7318ff38", + "metadata": {}, + "source": [ + "#### Run Inference\n", + "\n", + "Download an example video." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "848e5a56", + "metadata": {}, + "outputs": [], + "source": [ + "url_link = \"https://dl.fbaipublicfiles.com/pytorchvideo/projects/archery.mp4\"\n", + "video_path = 'archery.mp4'\n", + "try: urllib.URLopener().retrieve(url_link, video_path)\n", + "except: urllib.request.urlretrieve(url_link, video_path)" + ] + }, + { + "cell_type": "markdown", + "id": "326b7a94", + "metadata": {}, + "source": [ + "Load the video and transform it to the input format required by the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b09c420b", + "metadata": {}, + "outputs": [], + "source": [ + "# Select the duration of the clip to load by specifying the start and end duration\n", + "# The start_sec should correspond to where the action occurs in the video\n", + "start_sec = 0\n", + "end_sec = start_sec + clip_duration\n", + "\n", + "# Initialize an EncodedVideo helper class and load the video\n", + "video = EncodedVideo.from_path(video_path)\n", + "\n", + "# Load the desired clip\n", + "video_data = video.get_clip(start_sec=start_sec, end_sec=end_sec)\n", + "\n", + "# Apply a transform to normalize the video input\n", + "video_data = transform(video_data)\n", + "\n", + "# Move the inputs to the desired device\n", + "inputs = video_data[\"video\"]\n", + "inputs = [i.to(device)[None, ...] for i in inputs]" + ] + }, + { + "cell_type": "markdown", + "id": "ef00f1e7", + "metadata": {}, + "source": [ + "#### Get Predictions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a36d8fe2", + "metadata": {}, + "outputs": [], + "source": [ + "# Pass the input clip through the model\n", + "preds = model(inputs)\n", + "\n", + "# Get the predicted classes\n", + "post_act = torch.nn.Softmax(dim=1)\n", + "preds = post_act(preds)\n", + "pred_classes = preds.topk(k=5).indices[0]\n", + "\n", + "# Map the predicted classes to the label names\n", + "pred_class_names = [kinetics_id_to_classname[int(i)] for i in pred_classes]\n", + "print(\"Top 5 predicted labels: %s\" % \", \".join(pred_class_names))" + ] + }, + { + "cell_type": "markdown", + "id": "cf44a366", + "metadata": {}, + "source": [ + "### Model Description\n", + "SlowFast model architectures are based on [1] with pretrained weights using the 8x8 setting\n", + "on the Kinetics dataset. \n", + "\n", + "| arch | depth | frame length x sample rate | top 1 | top 5 | Flops (G) | Params (M) |\n", + "| --------------- | ----------- | ----------- | ----------- | ----------- | ----------- | ----------- | ----------- |\n", + "| SlowFast | R50 | 8x8 | 76.94 | 92.69 | 65.71 | 34.57 |\n", + "| SlowFast | R101 | 8x8 | 77.90 | 93.27 | 127.20 | 62.83 |\n", + "\n", + "\n", + "### References\n", + "[1] Christoph Feichtenhofer et al, \"SlowFast Networks for Video Recognition\"\n", + "https://arxiv.org/pdf/1812.03982.pdf" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/facebookresearch_pytorchvideo_x3d.ipynb b/assets/hub/facebookresearch_pytorchvideo_x3d.ipynb new file mode 100644 index 000000000000..bb9f6fd9ec1b --- /dev/null +++ b/assets/hub/facebookresearch_pytorchvideo_x3d.ipynb @@ -0,0 +1,297 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "3e6725de", + "metadata": {}, + "source": [ + "# X3D\n", + "\n", + "*Author: FAIR PyTorchVideo*\n", + "\n", + "**X3D networks pretrained on the Kinetics 400 dataset**\n", + "\n", + "\n", + "### Example Usage\n", + "\n", + "#### Imports\n", + "\n", + "Load the model:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "de0c7397", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "# Choose the `x3d_s` model\n", + "model_name = 'x3d_s'\n", + "model = torch.hub.load('facebookresearch/pytorchvideo', model_name, pretrained=True)" + ] + }, + { + "cell_type": "markdown", + "id": "acbddc1d", + "metadata": {}, + "source": [ + "Import remaining functions:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e307e30a", + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "import urllib\n", + "from pytorchvideo.data.encoded_video import EncodedVideo\n", + "\n", + "from torchvision.transforms import Compose, Lambda\n", + "from torchvision.transforms._transforms_video import (\n", + " CenterCropVideo,\n", + " NormalizeVideo,\n", + ")\n", + "from pytorchvideo.transforms import (\n", + " ApplyTransformToKey,\n", + " ShortSideScale,\n", + " UniformTemporalSubsample\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "66d03cd1", + "metadata": {}, + "source": [ + "#### Setup\n", + "\n", + "Set the model to eval mode and move to desired device." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4cb5446b", + "metadata": {}, + "outputs": [], + "source": [ + "# Set to GPU or CPU\n", + "device = \"cpu\"\n", + "model = model.eval()\n", + "model = model.to(device)" + ] + }, + { + "cell_type": "markdown", + "id": "3376fb4e", + "metadata": {}, + "source": [ + "Download the id to label mapping for the Kinetics 400 dataset on which the torch hub models were trained. This will be used to get the category label names from the predicted class ids." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5e149660", + "metadata": {}, + "outputs": [], + "source": [ + "json_url = \"https://dl.fbaipublicfiles.com/pyslowfast/dataset/class_names/kinetics_classnames.json\"\n", + "json_filename = \"kinetics_classnames.json\"\n", + "try: urllib.URLopener().retrieve(json_url, json_filename)\n", + "except: urllib.request.urlretrieve(json_url, json_filename)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "365b8fc4", + "metadata": {}, + "outputs": [], + "source": [ + "with open(json_filename, \"r\") as f:\n", + " kinetics_classnames = json.load(f)\n", + "\n", + "# Create an id to label name mapping\n", + "kinetics_id_to_classname = {}\n", + "for k, v in kinetics_classnames.items():\n", + " kinetics_id_to_classname[v] = str(k).replace('\"', \"\")" + ] + }, + { + "cell_type": "markdown", + "id": "2287db8a", + "metadata": {}, + "source": [ + "#### Define input transform" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4c0fcea9", + "metadata": {}, + "outputs": [], + "source": [ + "mean = [0.45, 0.45, 0.45]\n", + "std = [0.225, 0.225, 0.225]\n", + "frames_per_second = 30\n", + "model_transform_params = {\n", + " \"x3d_xs\": {\n", + " \"side_size\": 182,\n", + " \"crop_size\": 182,\n", + " \"num_frames\": 4,\n", + " \"sampling_rate\": 12,\n", + " },\n", + " \"x3d_s\": {\n", + " \"side_size\": 182,\n", + " \"crop_size\": 182,\n", + " \"num_frames\": 13,\n", + " \"sampling_rate\": 6,\n", + " },\n", + " \"x3d_m\": {\n", + " \"side_size\": 256,\n", + " \"crop_size\": 256,\n", + " \"num_frames\": 16,\n", + " \"sampling_rate\": 5,\n", + " }\n", + "}\n", + "\n", + "# Get transform parameters based on model\n", + "transform_params = model_transform_params[model_name]\n", + "\n", + "# Note that this transform is specific to the slow_R50 model.\n", + "transform = ApplyTransformToKey(\n", + " key=\"video\",\n", + " transform=Compose(\n", + " [\n", + " UniformTemporalSubsample(transform_params[\"num_frames\"]),\n", + " Lambda(lambda x: x/255.0),\n", + " NormalizeVideo(mean, std),\n", + " ShortSideScale(size=transform_params[\"side_size\"]),\n", + " CenterCropVideo(\n", + " crop_size=(transform_params[\"crop_size\"], transform_params[\"crop_size\"])\n", + " )\n", + " ]\n", + " ),\n", + ")\n", + "\n", + "# The duration of the input clip is also specific to the model.\n", + "clip_duration = (transform_params[\"num_frames\"] * transform_params[\"sampling_rate\"])/frames_per_second" + ] + }, + { + "cell_type": "markdown", + "id": "fee4a18a", + "metadata": {}, + "source": [ + "#### Run Inference\n", + "\n", + "Download an example video." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3942a869", + "metadata": {}, + "outputs": [], + "source": [ + "url_link = \"https://dl.fbaipublicfiles.com/pytorchvideo/projects/archery.mp4\"\n", + "video_path = 'archery.mp4'\n", + "try: urllib.URLopener().retrieve(url_link, video_path)\n", + "except: urllib.request.urlretrieve(url_link, video_path)" + ] + }, + { + "cell_type": "markdown", + "id": "06e1edb8", + "metadata": {}, + "source": [ + "Load the video and transform it to the input format required by the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8962d017", + "metadata": {}, + "outputs": [], + "source": [ + "# Select the duration of the clip to load by specifying the start and end duration\n", + "# The start_sec should correspond to where the action occurs in the video\n", + "start_sec = 0\n", + "end_sec = start_sec + clip_duration\n", + "\n", + "# Initialize an EncodedVideo helper class and load the video\n", + "video = EncodedVideo.from_path(video_path)\n", + "\n", + "# Load the desired clip\n", + "video_data = video.get_clip(start_sec=start_sec, end_sec=end_sec)\n", + "\n", + "# Apply a transform to normalize the video input\n", + "video_data = transform(video_data)\n", + "\n", + "# Move the inputs to the desired device\n", + "inputs = video_data[\"video\"]\n", + "inputs = inputs.to(device)" + ] + }, + { + "cell_type": "markdown", + "id": "5716df8a", + "metadata": {}, + "source": [ + "#### Get Predictions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "acf5911f", + "metadata": {}, + "outputs": [], + "source": [ + "# Pass the input clip through the model\n", + "preds = model(inputs[None, ...])\n", + "\n", + "# Get the predicted classes\n", + "post_act = torch.nn.Softmax(dim=1)\n", + "preds = post_act(preds)\n", + "pred_classes = preds.topk(k=5).indices[0]\n", + "\n", + "# Map the predicted classes to the label names\n", + "pred_class_names = [kinetics_id_to_classname[int(i)] for i in pred_classes]\n", + "print(\"Top 5 predicted labels: %s\" % \", \".join(pred_class_names))" + ] + }, + { + "cell_type": "markdown", + "id": "d6e404e1", + "metadata": {}, + "source": [ + "### Model Description\n", + "X3D model architectures are based on [1] pretrained on the Kinetics dataset.\n", + "\n", + "| arch | depth | frame length x sample rate | top 1 | top 5 | Flops (G) | Params (M) |\n", + "| --------------- | ----------- | ----------- | ----------- | ----------- | ----------- | ----------- | ----------- |\n", + "| X3D | XS | 4x12 | 69.12 | 88.63 | 0.91 | 3.79 |\n", + "| X3D | S | 13x6 | 73.33 | 91.27 | 2.96 | 3.79 |\n", + "| X3D | M | 16x5 | 75.94 | 92.72 | 6.72 | 3.79 |\n", + "\n", + "\n", + "### References\n", + "[1] Christoph Feichtenhofer, \"X3D: Expanding Architectures for\n", + " Efficient Video Recognition.\" https://arxiv.org/abs/2004.04730" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/facebookresearch_semi-supervised-ImageNet1K-models_resnext.ipynb b/assets/hub/facebookresearch_semi-supervised-ImageNet1K-models_resnext.ipynb index 53efcd736110..83ca76c68e69 100644 --- a/assets/hub/facebookresearch_semi-supervised-ImageNet1K-models_resnext.ipynb +++ b/assets/hub/facebookresearch_semi-supervised-ImageNet1K-models_resnext.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "a227ccfa", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -21,12 +22,13 @@ { "cell_type": "code", "execution_count": null, + "id": "1f5a1171", "metadata": {}, "outputs": [], "source": [ "import torch\n", "\n", - "# === SEMI-WEAKLY SUPERVISED MODELSP RETRAINED WITH 940 HASHTAGGED PUBLIC CONTENT === \n", + "# === SEMI-WEAKLY SUPERVISED MODELS PRETRAINED WITH 940 HASHTAGGED PUBLIC CONTENT ===\n", "model = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnet18_swsl')\n", "# model = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnet50_swsl')\n", "# model = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnext50_32x4d_swsl')\n", @@ -45,6 +47,7 @@ }, { "cell_type": "markdown", + "id": "6d9094af", "metadata": {}, "source": [ "All pre-trained models expect input images normalized in the same way,\n", @@ -58,12 +61,13 @@ { "cell_type": "code", "execution_count": null, + "id": "076e700f", "metadata": {}, "outputs": [], "source": [ "# Download an example image from the pytorch website\n", "import urllib\n", - "url, filename = (\"https://github.com/pytorch/hub/raw/master/dog.jpg\", \"dog.jpg\")\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n", "try: urllib.URLopener().retrieve(url, filename)\n", "except: urllib.request.urlretrieve(url, filename)" ] @@ -71,6 +75,7 @@ { "cell_type": "code", "execution_count": null, + "id": "e893da2a", "metadata": {}, "outputs": [], "source": [ @@ -94,7 +99,7 @@ "\n", "with torch.no_grad():\n", " output = model(input_batch)\n", - "# Tensor of shape 1000, with confidence scores over Imagenet's 1000 classes\n", + "# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes\n", "print(output[0])\n", "# The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n", "print(torch.nn.functional.softmax(output[0], dim=0))\n" @@ -102,28 +107,29 @@ }, { "cell_type": "markdown", + "id": "79829b2e", "metadata": {}, "source": [ "### Model Description\n", - "This project includes the semi-supervised and semi-weakly supervised ImageNet models introduced in \"Billion-scale Semi-Supervised Learning for Image Classification\" . \n", + "This project includes the semi-supervised and semi-weakly supervised ImageNet models introduced in \"Billion-scale Semi-Supervised Learning for Image Classification\" .\n", "\n", - "\"Semi-supervised\" (SSL) ImageNet models are pre-trained on a subset of unlabeled YFCC100M public image dataset and fine-tuned with the ImageNet1K training dataset, as described by the semi-supervised training framework in the paper mentioned above. In this case, the high capacity teacher model was trained only with labeled examples. \n", + "\"Semi-supervised\" (SSL) ImageNet models are pre-trained on a subset of unlabeled YFCC100M public image dataset and fine-tuned with the ImageNet1K training dataset, as described by the semi-supervised training framework in the paper mentioned above. In this case, the high capacity teacher model was trained only with labeled examples.\n", "\n", - "\"Semi-weakly\" supervised (SWSL) ImageNet models are pre-trained on **940 million** public images with 1.5K hashtags matching with 1000 ImageNet1K synsets, followed by fine-tuning on ImageNet1K dataset. In this case, the associated hashtags are only used for building a better teacher model. During training the student model, those hashtags are ingored and the student model is pretrained with a subset of 64M images selected by the teacher model from the same 940 million public image dataset. \n", + "\"Semi-weakly\" supervised (SWSL) ImageNet models are pre-trained on **940 million** public images with 1.5K hashtags matching with 1000 ImageNet1K synsets, followed by fine-tuning on ImageNet1K dataset. In this case, the associated hashtags are only used for building a better teacher model. During training the student model, those hashtags are ingored and the student model is pretrained with a subset of 64M images selected by the teacher model from the same 940 million public image dataset.\n", "\n", - "Semi-weakly supervised ResNet and ResNext models provided in the table below significantly improve the top-1 accuracy on the ImageNet validation set compared to training from scratch or other training mechanisms introduced in the literature as of September 2019. For example, **We achieve state-of-the-art accuracy of 81.2% on ImageNet for the widely used/adopted ResNet-50 model architecture**. \n", + "Semi-weakly supervised ResNet and ResNext models provided in the table below significantly improve the top-1 accuracy on the ImageNet validation set compared to training from scratch or other training mechanisms introduced in the literature as of September 2019. For example, **We achieve state-of-the-art accuracy of 81.2% on ImageNet for the widely used/adopted ResNet-50 model architecture**.\n", "\n", "\n", "| Architecture | Supervision | #Parameters | FLOPS | Top-1 Acc. | Top-5 Acc. |\n", "| ------------------ | :--------------:|:----------: | :---: | :--------: | :--------: |\n", "| ResNet-18 | semi-supervised |14M | 2B | 72.8 | 91.5 |\n", - "| ResNet-50 | semi-supervised |25M | 4B | 79.3 | 94.9 | \n", + "| ResNet-50 | semi-supervised |25M | 4B | 79.3 | 94.9 |\n", "| ResNeXt-50 32x4d | semi-supervised |25M | 4B | 80.3 | 95.4 |\n", "| ResNeXt-101 32x4d | semi-supervised |42M | 8B | 81.0 | 95.7 |\n", "| ResNeXt-101 32x8d | semi-supervised |88M | 16B | 81.7 | 96.1 |\n", "| ResNeXt-101 32x16d | semi-supervised |193M | 36B | 81.9 | 96.2 |\n", "| ResNet-18 | semi-weakly supervised |14M | 2B | **73.4** | 91.9 |\n", - "| ResNet-50 | semi-weakly supervised |25M | 4B | **81.2** | 96.0 | \n", + "| ResNet-50 | semi-weakly supervised |25M | 4B | **81.2** | 96.0 |\n", "| ResNeXt-50 32x4d | semi-weakly supervised |25M | 4B | **82.2** | 96.3 |\n", "| ResNeXt-101 32x4d | semi-weakly supervised |42M | 8B | **83.4** | 96.8 |\n", "| ResNeXt-101 32x8d | semi-weakly supervised |88M | 16B | **84.3** | 97.2 |\n", @@ -138,6 +144,7 @@ { "cell_type": "code", "execution_count": null, + "id": "c8018412", "metadata": {}, "outputs": [], "source": [ @@ -154,5 +161,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/huggingface_pytorch-transformers.ipynb b/assets/hub/huggingface_pytorch-transformers.ipynb index c602f3239294..5dbf9f09ca2b 100644 --- a/assets/hub/huggingface_pytorch-transformers.ipynb +++ b/assets/hub/huggingface_pytorch-transformers.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "39bdec26", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -30,7 +31,7 @@ "5. **[XLNet](https://github.com/zihangdai/xlnet/)** (from Google/CMU) released with the paper [​XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.\n", "6. **[XLM](https://github.com/facebookresearch/XLM/)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.\n", "7. **[RoBERTa](https://github.com/pytorch/fairseq/tree/master/examples/roberta)** (from Facebook), released together with the paper a [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.\n", - "8. **[DistilBERT](https://github.com/huggingface/pytorch-transformers/tree/master/examples/distillation)** (from HuggingFace), released together with the blogpost [Smaller, faster, cheaper, lighter: Introducing DistilBERT, a distilled version of BERT](https://medium.com/huggingface/distilbert-8cf3380435b) by Victor Sanh, Lysandre Debut and Thomas Wolf.\n", + "8. **[DistilBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation)** (from HuggingFace), released together with the blogpost [Smaller, faster, cheaper, lighter: Introducing DistilBERT, a distilled version of BERT](https://medium.com/huggingface/distilbert-8cf3380435b5) by Victor Sanh, Lysandre Debut and Thomas Wolf.\n", "\n", "The components available here are based on the `AutoModel` and `AutoTokenizer` classes of the `pytorch-transformers` library.\n", "\n", @@ -42,6 +43,7 @@ { "cell_type": "code", "execution_count": null, + "id": "5c53067d", "metadata": {}, "outputs": [], "source": [ @@ -51,6 +53,7 @@ }, { "cell_type": "markdown", + "id": "308262f7", "metadata": {}, "source": [ "# Usage\n", @@ -59,7 +62,7 @@ "- `config`: returns a configuration item corresponding to the specified model or pth.\n", "- `tokenizer`: returns a tokenizer corresponding to the specified model or path\n", "- `model`: returns a model corresponding to the specified model or path\n", - "- `modelWithLMHead`: returns a model with a language modeling head corresponding to the specified model or path\n", + "- `modelForCausalLM`: returns a model with a language modeling head corresponding to the specified model or path\n", "- `modelForSequenceClassification`: returns a model with a sequence classifier corresponding to the specified model or path\n", "- `modelForQuestionAnswering`: returns a model with a question answering head corresponding to the specified model or path\n", "\n", @@ -68,7 +71,7 @@ "\n", "\n", "\n", - "The available models are listed on the [pytorch-transformers documentation, pre-trained models section](https://huggingface.co/pytorch-transformers/pretrained_models.html).\n", + "The available models are listed on the [transformers documentation, models page](https://huggingface.co/models).\n", "\n", "# Documentation\n", "\n", @@ -77,12 +80,13 @@ "\n", "## Tokenizer\n", "\n", - "The tokenizer object allows the conversion from character strings to tokens understood by the different models. Each model has its own tokenizer, and some tokenizing methods are different across tokenizers. The complete documentation can be found [here](https://huggingface.co/pytorch-transformers/main_classes/tokenizer.html)." + "The tokenizer object allows the conversion from character strings to tokens understood by the different models. Each model has its own tokenizer, and some tokenizing methods are different across tokenizers. The complete documentation can be found [here](https://huggingface.co/docs/transformers/main_classes/tokenizer)." ] }, { "cell_type": "code", "execution_count": null, + "id": "55d0ddd6", "metadata": { "attributes": { "classes": [ @@ -100,16 +104,18 @@ }, { "cell_type": "markdown", + "id": "9b76281a", "metadata": {}, "source": [ "## Models\n", "\n", - "The model object is a model instance inheriting from a `nn.Module`. Each model is accompanied by their saving/loading methods, either from a local file or directory, or from a pre-trained configuration (see previously described `config`). Each model works differently, a complete overview of the different models can be found in the [documentation](https://huggingface.co/pytorch-transformers/pretrained_models.html)." + "The model object is a model instance inheriting from a `nn.Module`. Each model is accompanied by their saving/loading methods, either from a local file or directory, or from a pre-trained configuration (see previously described `config`). Each model works differently, a complete overview of the different models can be found in the [documentation](https://huggingface.co/docs/transformers/main_classes/model)." ] }, { "cell_type": "code", "execution_count": null, + "id": "f0b040fa", "metadata": { "attributes": { "classes": [ @@ -132,6 +138,7 @@ }, { "cell_type": "markdown", + "id": "ff8a4aed", "metadata": {}, "source": [ "## Models with a language modeling head\n", @@ -142,6 +149,7 @@ { "cell_type": "code", "execution_count": null, + "id": "bb1f0833", "metadata": { "attributes": { "classes": [ @@ -153,17 +161,18 @@ "outputs": [], "source": [ "import torch\n", - "model = torch.hub.load('huggingface/pytorch-transformers', 'modelWithLMHead', 'bert-base-uncased') # Download model and configuration from S3 and cache.\n", - "model = torch.hub.load('huggingface/pytorch-transformers', 'modelWithLMHead', './test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`\n", - "model = torch.hub.load('huggingface/pytorch-transformers', 'modelWithLMHead', 'bert-base-uncased', output_attentions=True) # Update configuration during loading\n", + "model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', 'gpt2') # Download model and configuration from huggingface.co and cache.\n", + "model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', './test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`\n", + "model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', 'gpt2', output_attentions=True) # Update configuration during loading\n", "assert model.config.output_attentions == True\n", "# Loading from a TF checkpoint file instead of a PyTorch model (slower)\n", - "config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')\n", - "model = torch.hub.load('huggingface/pytorch-transformers', 'modelWithLMHead', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)" + "config = AutoConfig.from_pretrained('./tf_model/gpt_tf_model_config.json')\n", + "model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', './tf_model/gpt_tf_checkpoint.ckpt.index', from_tf=True, config=config)" ] }, { "cell_type": "markdown", + "id": "6b609dea", "metadata": {}, "source": [ "## Models with a sequence classification head\n", @@ -174,6 +183,7 @@ { "cell_type": "code", "execution_count": null, + "id": "13fb07df", "metadata": { "attributes": { "classes": [ @@ -196,6 +206,7 @@ }, { "cell_type": "markdown", + "id": "ce5a6abf", "metadata": {}, "source": [ "## Models with a question answering head\n", @@ -206,6 +217,7 @@ { "cell_type": "code", "execution_count": null, + "id": "66de02b2", "metadata": { "attributes": { "classes": [ @@ -228,16 +240,18 @@ }, { "cell_type": "markdown", + "id": "dad5761e", "metadata": {}, "source": [ "## Configuration\n", "\n", - "The configuration is optional. The configuration object holds information concerning the model, such as the number of heads/layers, if the model should output attentions or hidden states, or if it should be adapted for TorchScript. Many parameters are available, some specific to each model. The complete documentation can be found [here](https://huggingface.co/pytorch-transformers/main_classes/configuration.html)." + "The configuration is optional. The configuration object holds information concerning the model, such as the number of heads/layers, if the model should output attentions or hidden states, or if it should be adapted for TorchScript. Many parameters are available, some specific to each model. The complete documentation can be found [here](https://huggingface.co/docs/transformers/main_classes/configuration)." ] }, { "cell_type": "code", "execution_count": null, + "id": "add1df70", "metadata": { "attributes": { "classes": [ @@ -268,6 +282,7 @@ }, { "cell_type": "markdown", + "id": "5fe5dae4", "metadata": {}, "source": [ "# Example Usage\n", @@ -280,6 +295,7 @@ { "cell_type": "code", "execution_count": null, + "id": "f55c0546", "metadata": {}, "outputs": [], "source": [ @@ -295,6 +311,7 @@ }, { "cell_type": "markdown", + "id": "ef0883ad", "metadata": {}, "source": [ "## Using `BertModel` to encode the input sentence in a sequence of last layer hidden-states" @@ -303,6 +320,7 @@ { "cell_type": "code", "execution_count": null, + "id": "94d24133", "metadata": {}, "outputs": [], "source": [ @@ -321,14 +339,16 @@ }, { "cell_type": "markdown", + "id": "baa6cac7", "metadata": {}, "source": [ - "## Using `modelWithLMHead` to predict a masked token with BERT" + "## Using `modelForMaskedLM` to predict a masked token with BERT" ] }, { "cell_type": "code", "execution_count": null, + "id": "05547c6d", "metadata": {}, "outputs": [], "source": [ @@ -337,10 +357,10 @@ "indexed_tokens[masked_index] = tokenizer.mask_token_id\n", "tokens_tensor = torch.tensor([indexed_tokens])\n", "\n", - "masked_lm__model = torch.hub.load('huggingface/pytorch-transformers', 'modelWithLMHead', 'bert-base-cased')\n", + "masked_lm_model = torch.hub.load('huggingface/pytorch-transformers', 'modelForMaskedLM', 'bert-base-cased')\n", "\n", "with torch.no_grad():\n", - " predictions = masked_lm__model(tokens_tensor, token_type_ids=segments_tensors)\n", + " predictions = masked_lm_model(tokens_tensor, token_type_ids=segments_tensors)\n", "\n", "# Get the predicted token\n", "predicted_index = torch.argmax(predictions[0][0], dim=1)[masked_index].item()\n", @@ -350,6 +370,7 @@ }, { "cell_type": "markdown", + "id": "66b09e9d", "metadata": {}, "source": [ "## Using `modelForQuestionAnswering` to do question answering with BERT" @@ -358,6 +379,7 @@ { "cell_type": "code", "execution_count": null, + "id": "1412f4b2", "metadata": {}, "outputs": [], "source": [ @@ -374,10 +396,10 @@ "\n", "# Predict the start and end positions logits\n", "with torch.no_grad():\n", - " start_logits, end_logits = question_answering_model(tokens_tensor, token_type_ids=segments_tensors)\n", + " out = question_answering_model(tokens_tensor, token_type_ids=segments_tensors)\n", "\n", "# get the highest prediction\n", - "answer = question_answering_tokenizer.decode(indexed_tokens[torch.argmax(start_logits):torch.argmax(end_logits)+1])\n", + "answer = question_answering_tokenizer.decode(indexed_tokens[torch.argmax(out.start_logits):torch.argmax(out.end_logits)+1])\n", "assert answer == \"puppeteer\"\n", "\n", "# Or get the total loss which is the sum of the CrossEntropy loss for the start and end token positions (set model to train mode before if used for training)\n", @@ -387,6 +409,7 @@ }, { "cell_type": "markdown", + "id": "3cd69bf4", "metadata": {}, "source": [ "## Using `modelForSequenceClassification` to do paraphrase classification with BERT" @@ -395,6 +418,7 @@ { "cell_type": "code", "execution_count": null, + "id": "f3c3cb33", "metadata": {}, "outputs": [], "source": [ @@ -424,5 +448,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/hustvl_yolop.ipynb b/assets/hub/hustvl_yolop.ipynb new file mode 100644 index 000000000000..210d7fd6b7ef --- /dev/null +++ b/assets/hub/hustvl_yolop.ipynb @@ -0,0 +1,165 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "581f8ae6", + "metadata": {}, + "source": [ + "### This notebook is optionally accelerated with a GPU runtime.\n", + "### If you would like to use this acceleration, please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# YOLOP\n", + "\n", + "*Author: Hust Visual Learning Team*\n", + "\n", + "**YOLOP pretrained on the BDD100K dataset**\n", + "\n", + "## Before You Start\n", + "To install YOLOP dependencies:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9811e88b", + "metadata": {}, + "outputs": [], + "source": [ + "%%bash\n", + "pip install -qr https://github.com/hustvl/YOLOP/blob/main/requirements.txt # install dependencies" + ] + }, + { + "cell_type": "markdown", + "id": "b76d68d2", + "metadata": {}, + "source": [ + "## YOLOP: You Only Look Once for Panoptic driving Perception\n", + "\n", + "### Model Description\n", + "\n", + "\"YOLOP\n", + " \n", + "\n", + "- YOLOP is an efficient multi-task network that can jointly handle three crucial tasks in autonomous driving: object detection, drivable area segmentation and lane detection. And it is also the first to reach real-time on embedded devices while maintaining state-of-the-art level performance on the **BDD100K** dataset.\n", + "\n", + "\n", + "### Results\n", + "\n", + "#### Traffic Object Detection Result\n", + "\n", + "| Model | Recall(%) | mAP50(%) | Speed(fps) |\n", + "| -------------- | --------- | -------- | ---------- |\n", + "| `Multinet` | 81.3 | 60.2 | 8.6 |\n", + "| `DLT-Net` | 89.4 | 68.4 | 9.3 |\n", + "| `Faster R-CNN` | 77.2 | 55.6 | 5.3 |\n", + "| `YOLOv5s` | 86.8 | 77.2 | 82 |\n", + "| `YOLOP(ours)` | 89.2 | 76.5 | 41 |\n", + "\n", + "#### Drivable Area Segmentation Result\n", + "\n", + "| Model | mIOU(%) | Speed(fps) |\n", + "| ------------- | ------- | ---------- |\n", + "| `Multinet` | 71.6 | 8.6 |\n", + "| `DLT-Net` | 71.3 | 9.3 |\n", + "| `PSPNet` | 89.6 | 11.1 |\n", + "| `YOLOP(ours)` | 91.5 | 41 |\n", + "\n", + "#### Lane Detection Result\n", + "\n", + "| Model | mIOU(%) | IOU(%) |\n", + "| ------------- | ------- | ------ |\n", + "| `ENet` | 34.12 | 14.64 |\n", + "| `SCNN` | 35.79 | 15.84 |\n", + "| `ENet-SAD` | 36.56 | 16.02 |\n", + "| `YOLOP(ours)` | 70.50 | 26.20 |\n", + "\n", + "#### Ablation Studies 1: End-to-end v.s. Step-by-step\n", + "\n", + "| Training_method | Recall(%) | AP(%) | mIoU(%) | Accuracy(%) | IoU(%) |\n", + "| --------------- | --------- | ----- | ------- | ----------- | ------ |\n", + "| `ES-W` | 87.0 | 75.3 | 90.4 | 66.8 | 26.2 |\n", + "| `ED-W` | 87.3 | 76.0 | 91.6 | 71.2 | 26.1 |\n", + "| `ES-D-W` | 87.0 | 75.1 | 91.7 | 68.6 | 27.0 |\n", + "| `ED-S-W` | 87.5 | 76.1 | 91.6 | 68.0 | 26.8 |\n", + "| `End-to-end` | 89.2 | 76.5 | 91.5 | 70.5 | 26.2 |\n", + "\n", + "#### Ablation Studies 2: Multi-task v.s. Single task\n", + "\n", + "| Training_method | Recall(%) | AP(%) | mIoU(%) | Accuracy(%) | IoU(%) | Speed(ms/frame) |\n", + "| --------------- | --------- | ----- | ------- | ----------- | ------ | --------------- |\n", + "| `Det(only)` | 88.2 | 76.9 | - | - | - | 15.7 |\n", + "| `Da-Seg(only)` | - | - | 92.0 | - | - | 14.8 |\n", + "| `Ll-Seg(only)` | - | - | - | 79.6 | 27.9 | 14.8 |\n", + "| `Multitask` | 89.2 | 76.5 | 91.5 | 70.5 | 26.2 | 24.4 |\n", + "\n", + "**Notes**:\n", + "\n", + "- In table 4, E, D, S and W refer to Encoder, Detect head, two Segment heads and whole network. So the Algorithm (First, we only train Encoder and Detect head. Then we freeze the Encoder and Detect head as well as train two Segmentation heads. Finally, the entire network is trained jointly for all three tasks.) can be marked as ED-S-W, and the same for others.\n", + "\n", + "### Visualization\n", + "\n", + "#### Traffic Object Detection Result\n", + "\n", + "\"Traffic\n", + " \n", + "\n", + "#### Drivable Area Segmentation Result\n", + "\n", + "\"Drivable\n", + " \n", + "\n", + "#### Lane Detection Result\n", + "\n", + "\"Lane\n", + " \n", + "\n", + "**Notes**:\n", + "\n", + "- The visualization of lane detection result has been post processed by quadratic fitting.\n", + "\n", + "### Deployment\n", + "\n", + "Our model can reason in real-time on **Jetson Tx2**, with **Zed Camera** to capture image. We use **TensorRT** tool for speeding up. We provide code for deployment and reasoning of model in [github code](https://github.com/hustvl/YOLOP/tree/main/toolkits/deploy).\n", + "\n", + "\n", + "### Load From PyTorch Hub\n", + "This example loads the pretrained **YOLOP** model and passes an image for inference." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "35131132", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "\n", + "# load model\n", + "model = torch.hub.load('hustvl/yolop', 'yolop', pretrained=True)\n", + "\n", + "#inference\n", + "img = torch.randn(1,3,640,640)\n", + "det_out, da_seg_out,ll_seg_out = model(img)" + ] + }, + { + "cell_type": "markdown", + "id": "b1f7cb62", + "metadata": {}, + "source": [ + "### Citation\n", + "\n", + "See for more detail in [github code](https://github.com/hustvl/YOLOP) and [arxiv paper](https://arxiv.org/abs/2108.11250).\n", + "\n", + "If you find our paper and code useful for your research, please consider giving a star and citation:" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/intelisl_midas_v2.ipynb b/assets/hub/intelisl_midas_v2.ipynb new file mode 100644 index 000000000000..d42c6ca06580 --- /dev/null +++ b/assets/hub/intelisl_midas_v2.ipynb @@ -0,0 +1,270 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "9773e1b5", + "metadata": {}, + "source": [ + "### This notebook is optionally accelerated with a GPU runtime.\n", + "### If you would like to use this acceleration, please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# MiDaS\n", + "\n", + "*Author: Intel ISL*\n", + "\n", + "**MiDaS models for computing relative depth from a single image.**\n", + "\n", + "\"alt\"\n", + "\n", + "\n", + "### Model Description\n", + "\n", + "[MiDaS](https://arxiv.org/abs/1907.01341) computes relative inverse depth from a single image. The repository provides multiple models that cover different use cases ranging from a small, high-speed model to a very large model that provide the highest accuracy. The models have been trained on 10 distinct datasets using\n", + "multi-objective optimization to ensure high quality on a wide range of inputs.\n", + "\n", + "### Dependencies\n", + "\n", + "MiDaS depends on [timm](https://github.com/rwightman/pytorch-image-models). Install with" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "61d54376", + "metadata": { + "attributes": { + "classes": [ + "shell" + ], + "id": "" + } + }, + "outputs": [], + "source": [ + "pip install timm" + ] + }, + { + "cell_type": "markdown", + "id": "62431618", + "metadata": {}, + "source": [ + "### Example Usage\n", + "\n", + "Download an image from the PyTorch homepage" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3eb1ad78", + "metadata": {}, + "outputs": [], + "source": [ + "import cv2\n", + "import torch\n", + "import urllib.request\n", + "\n", + "import matplotlib.pyplot as plt\n", + "\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n", + "urllib.request.urlretrieve(url, filename)" + ] + }, + { + "cell_type": "markdown", + "id": "a5615c57", + "metadata": {}, + "source": [ + "Load a model (see [https://github.com/intel-isl/MiDaS/#Accuracy](https://github.com/intel-isl/MiDaS/#Accuracy) for an overview)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6670b011", + "metadata": {}, + "outputs": [], + "source": [ + "model_type = \"DPT_Large\" # MiDaS v3 - Large (highest accuracy, slowest inference speed)\n", + "#model_type = \"DPT_Hybrid\" # MiDaS v3 - Hybrid (medium accuracy, medium inference speed)\n", + "#model_type = \"MiDaS_small\" # MiDaS v2.1 - Small (lowest accuracy, highest inference speed)\n", + "\n", + "midas = torch.hub.load(\"intel-isl/MiDaS\", model_type)" + ] + }, + { + "cell_type": "markdown", + "id": "23f225c4", + "metadata": {}, + "source": [ + "Move model to GPU if available" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "812b485a", + "metadata": {}, + "outputs": [], + "source": [ + "device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n", + "midas.to(device)\n", + "midas.eval()" + ] + }, + { + "cell_type": "markdown", + "id": "16a3ab83", + "metadata": {}, + "source": [ + "Load transforms to resize and normalize the image for large or small model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f55089f9", + "metadata": {}, + "outputs": [], + "source": [ + "midas_transforms = torch.hub.load(\"intel-isl/MiDaS\", \"transforms\")\n", + "\n", + "if model_type == \"DPT_Large\" or model_type == \"DPT_Hybrid\":\n", + " transform = midas_transforms.dpt_transform\n", + "else:\n", + " transform = midas_transforms.small_transform" + ] + }, + { + "cell_type": "markdown", + "id": "f2df79b1", + "metadata": {}, + "source": [ + "Load image and apply transforms" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "db3feed4", + "metadata": {}, + "outputs": [], + "source": [ + "img = cv2.imread(filename)\n", + "img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n", + "\n", + "input_batch = transform(img).to(device)" + ] + }, + { + "cell_type": "markdown", + "id": "4fecc717", + "metadata": {}, + "source": [ + "Predict and resize to original resolution" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "98ff0d23", + "metadata": {}, + "outputs": [], + "source": [ + "with torch.no_grad():\n", + " prediction = midas(input_batch)\n", + "\n", + " prediction = torch.nn.functional.interpolate(\n", + " prediction.unsqueeze(1),\n", + " size=img.shape[:2],\n", + " mode=\"bicubic\",\n", + " align_corners=False,\n", + " ).squeeze()\n", + "\n", + "output = prediction.cpu().numpy()" + ] + }, + { + "cell_type": "markdown", + "id": "5fbc168f", + "metadata": {}, + "source": [ + "Show result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bb63402b", + "metadata": {}, + "outputs": [], + "source": [ + "plt.imshow(output)\n", + "# plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "4d393d29", + "metadata": {}, + "source": [ + "### References\n", + "[Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer](https://arxiv.org/abs/1907.01341)\n", + "\n", + "[Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413)\n", + "\n", + "Please cite our papers if you use our models:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "34da4fb8", + "metadata": { + "attributes": { + "classes": [ + "bibtex" + ], + "id": "" + } + }, + "outputs": [], + "source": [ + "@article{Ranftl2020,\n", + "\tauthor = {Ren\\'{e} Ranftl and Katrin Lasinger and David Hafner and Konrad Schindler and Vladlen Koltun},\n", + "\ttitle = {Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer},\n", + "\tjournal = {IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI)},\n", + "\tyear = {2020},\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ea1d3aeb", + "metadata": { + "attributes": { + "classes": [ + "bibtex" + ], + "id": "" + } + }, + "outputs": [], + "source": [ + "@article{Ranftl2021,\n", + "\tauthor = {Ren\\'{e} Ranftl and Alexey Bochkovskiy and Vladlen Koltun},\n", + "\ttitle = {Vision Transformers for Dense Prediction},\n", + "\tjournal = {ArXiv preprint},\n", + "\tyear = {2021},\n", + "}" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/mateuszbuda_brain-segmentation-pytorch_unet.ipynb b/assets/hub/mateuszbuda_brain-segmentation-pytorch_unet.ipynb index ff3094d2cfb7..583641653b2c 100644 --- a/assets/hub/mateuszbuda_brain-segmentation-pytorch_unet.ipynb +++ b/assets/hub/mateuszbuda_brain-segmentation-pytorch_unet.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "00cb4ca5", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -21,6 +22,7 @@ { "cell_type": "code", "execution_count": null, + "id": "40a00854", "metadata": {}, "outputs": [], "source": [ @@ -31,6 +33,7 @@ }, { "cell_type": "markdown", + "id": "a69ab60c", "metadata": {}, "source": [ "Loads a U-Net model pre-trained for abnormality segmentation on a dataset of brain MRI volumes [kaggle.com/mateuszbuda/lgg-mri-segmentation](https://www.kaggle.com/mateuszbuda/lgg-mri-segmentation)\n", @@ -40,7 +43,7 @@ "\n", "This U-Net model comprises four levels of blocks containing two convolutional layers with batch normalization and ReLU activation function, and one max pooling layer in the encoding part and up-convolutional layers instead in the decoding part.\n", "The number of convolutional filters in each block is 32, 64, 128, and 256.\n", - "The buttleneck layer has 512 convolutional filters.\n", + "The bottleneck layer has 512 convolutional filters.\n", "From the encoding layers, skip connections are used to the corresponding layers in the decoding part.\n", "Input image is a 3-channel brain MRI slice from pre-contrast, FLAIR, and post-contrast sequences, respectively.\n", "Output is a one-channel probability map of abnormality regions with the same size as the input image.\n", @@ -54,6 +57,7 @@ { "cell_type": "code", "execution_count": null, + "id": "a1f32ed5", "metadata": {}, "outputs": [], "source": [ @@ -67,6 +71,7 @@ { "cell_type": "code", "execution_count": null, + "id": "373c3944", "metadata": {}, "outputs": [], "source": [ @@ -95,6 +100,7 @@ }, { "cell_type": "markdown", + "id": "8e5a28e4", "metadata": {}, "source": [ "### References\n", @@ -107,5 +113,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/nicolalandro_ntsnet-cub200_ntsnet.ipynb b/assets/hub/nicolalandro_ntsnet-cub200_ntsnet.ipynb new file mode 100644 index 000000000000..ab6e2386c415 --- /dev/null +++ b/assets/hub/nicolalandro_ntsnet-cub200_ntsnet.ipynb @@ -0,0 +1,118 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "876c6cc2", + "metadata": {}, + "source": [ + "### This notebook is optionally accelerated with a GPU runtime.\n", + "### If you would like to use this acceleration, please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# ntsnet\n", + "\n", + "*Author: Moreno Caraffini and Nicola Landro*\n", + "\n", + "**classify birds using this fine-grained image classifier**\n", + "\n", + "\"alt\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0c01c157", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "model = torch.hub.load('nicolalandro/ntsnet-cub200', 'ntsnet', pretrained=True,\n", + " **{'topN': 6, 'device':'cpu', 'num_classes': 200})" + ] + }, + { + "cell_type": "markdown", + "id": "315310b8", + "metadata": {}, + "source": [ + "### Example Usage" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "53f54463", + "metadata": {}, + "outputs": [], + "source": [ + "from torchvision import transforms\n", + "import torch\n", + "import urllib\n", + "from PIL import Image\n", + "\n", + "transform_test = transforms.Compose([\n", + " transforms.Resize((600, 600), Image.BILINEAR),\n", + " transforms.CenterCrop((448, 448)),\n", + " # transforms.RandomHorizontalFlip(), # only if train\n", + " transforms.ToTensor(),\n", + " transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n", + "])\n", + "\n", + "\n", + "model = torch.hub.load('nicolalandro/ntsnet-cub200', 'ntsnet', pretrained=True, **{'topN': 6, 'device':'cpu', 'num_classes': 200})\n", + "model.eval()\n", + "\n", + "url = 'https://raw.githubusercontent.com/nicolalandro/ntsnet-cub200/master/images/nts-net.png'\n", + "img = Image.open(urllib.request.urlopen(url))\n", + "scaled_img = transform_test(img)\n", + "torch_images = scaled_img.unsqueeze(0)\n", + "\n", + "with torch.no_grad():\n", + " top_n_coordinates, concat_out, raw_logits, concat_logits, part_logits, top_n_index, top_n_prob = model(torch_images)\n", + "\n", + " _, predict = torch.max(concat_logits, 1)\n", + " pred_id = predict.item()\n", + " print('bird class:', model.bird_classes[pred_id])" + ] + }, + { + "cell_type": "markdown", + "id": "5c543503", + "metadata": {}, + "source": [ + "### Model Description\n", + "This is an nts-net pretrained with CUB200 2011 dataset, which is a fine grained dataset of birds species.\n", + "\n", + "### References\n", + "You can read the full paper at this [link](http://artelab.dista.uninsubria.it/res/research/papers/2019/2019-IVCNZ-Nawaz-Birds.pdf)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c4165e38", + "metadata": { + "attributes": { + "classes": [ + "bibtex" + ], + "id": "" + } + }, + "outputs": [], + "source": [ + "@INPROCEEDINGS{Gallo:2019:IVCNZ,\n", + " author={Nawaz, Shah and Calefati, Alessandro and Caraffini, Moreno and Landro, Nicola and Gallo, Ignazio},\n", + " booktitle={2019 International Conference on Image and Vision Computing New Zealand (IVCNZ 2019)},\n", + " title={Are These Birds Similar: Learning Branched Networks for Fine-grained Representations},\n", + " year={2019},\n", + " month={Dec},\n", + "}" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/nvidia_deeplearningexamples_efficientnet.ipynb b/assets/hub/nvidia_deeplearningexamples_efficientnet.ipynb new file mode 100644 index 000000000000..4cbf7f8872c3 --- /dev/null +++ b/assets/hub/nvidia_deeplearningexamples_efficientnet.ipynb @@ -0,0 +1,204 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ceae5ab0", + "metadata": {}, + "source": [ + "### This notebook requires a GPU runtime to run.\n", + "### Please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# EfficientNet\n", + "\n", + "*Author: NVIDIA*\n", + "\n", + "**EfficientNets are a family of image classification models, which achieve state-of-the-art accuracy, being an order-of-magnitude smaller and faster. Trained with mixed precision using Tensor Cores.**\n", + "\n", + "\"alt\"\n", + "\n", + "\n", + "\n", + "### Model Description\n", + "\n", + "EfficientNet is an image classification model family. It was first described in [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946). This notebook allows you to load and test the EfficientNet-B0, EfficientNet-B4, EfficientNet-WideSE-B0 and, EfficientNet-WideSE-B4 models.\n", + "\n", + "EfficientNet-WideSE models use Squeeze-and-Excitation layers wider than original EfficientNet models, the width of SE module is proportional to the width of Depthwise Separable Convolutions instead of block width.\n", + "\n", + "WideSE models are slightly more accurate than original models.\n", + "\n", + "This model is trained with mixed precision using Tensor Cores on Volta and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results over 2x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.\n", + "\n", + "We use [NHWC data layout](https://pytorch.org/tutorials/intermediate/memory_format_tutorial.html) when training using Mixed Precision.\n", + "\n", + "### Example\n", + "\n", + "In the example below we will use the pretrained ***EfficientNet*** model to perform inference on image and present the result.\n", + "\n", + "To run the example you need some extra python packages installed. These are needed for preprocessing images and visualization." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "619cf1e4", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install validators matplotlib" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c6d96c94", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "from PIL import Image\n", + "import torchvision.transforms as transforms\n", + "import numpy as np\n", + "import json\n", + "import requests\n", + "import matplotlib.pyplot as plt\n", + "import warnings\n", + "warnings.filterwarnings('ignore')\n", + "%matplotlib inline\n", + "\n", + "device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n", + "print(f'Using {device} for inference')" + ] + }, + { + "cell_type": "markdown", + "id": "36588892", + "metadata": {}, + "source": [ + "Load the model pretrained on ImageNet dataset.\n", + "\n", + "You can choose among the following models:\n", + "\n", + "| TorchHub entrypoint | Description |\n", + "| :----- | :----- |\n", + "| `nvidia_efficientnet_b0` | baseline EfficientNet |\n", + "| `nvidia_efficientnet_b4` | scaled EfficientNet|\n", + "| `nvidia_efficientnet_widese_b0` | model with Squeeze-and-Excitation layers wider than baseline EfficientNet model |\n", + "| `nvidia_efficientnet_widese_b4` | model with Squeeze-and-Excitation layers wider than scaled EfficientNet model |\n", + "\n", + "There are also quantized version of the models, but they require nvidia container. See [quantized models](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/efficientnet#quantization)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f154f3ba", + "metadata": {}, + "outputs": [], + "source": [ + "efficientnet = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_efficientnet_b0', pretrained=True)\n", + "utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_convnets_processing_utils')\n", + "\n", + "efficientnet.eval().to(device)\n" + ] + }, + { + "cell_type": "markdown", + "id": "99733b78", + "metadata": {}, + "source": [ + "Prepare sample input data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b8e50db6", + "metadata": {}, + "outputs": [], + "source": [ + "uris = [\n", + " 'http://images.cocodataset.org/test-stuff2017/000000024309.jpg',\n", + " 'http://images.cocodataset.org/test-stuff2017/000000028117.jpg',\n", + " 'http://images.cocodataset.org/test-stuff2017/000000006149.jpg',\n", + " 'http://images.cocodataset.org/test-stuff2017/000000004954.jpg',\n", + "]\n", + "\n", + "batch = torch.cat(\n", + " [utils.prepare_input_from_uri(uri) for uri in uris]\n", + ").to(device)" + ] + }, + { + "cell_type": "markdown", + "id": "9ac2d970", + "metadata": {}, + "source": [ + "Run inference. Use `pick_n_best(predictions=output, n=topN)` helper function to pick N most probable hypotheses according to the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e5707fc8", + "metadata": {}, + "outputs": [], + "source": [ + "with torch.no_grad():\n", + " output = torch.nn.functional.softmax(efficientnet(batch), dim=1)\n", + " \n", + "results = utils.pick_n_best(predictions=output, n=5)" + ] + }, + { + "cell_type": "markdown", + "id": "7a073912", + "metadata": {}, + "source": [ + "Display the result." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9673244c", + "metadata": {}, + "outputs": [], + "source": [ + "for uri, result in zip(uris, results):\n", + " img = Image.open(requests.get(uri, stream=True).raw)\n", + " img.thumbnail((256,256), Image.ANTIALIAS)\n", + " plt.imshow(img)\n", + " plt.show()\n", + " print(result)" + ] + }, + { + "cell_type": "markdown", + "id": "45892070", + "metadata": {}, + "source": [ + "### Details\n", + "For detailed information on model input and output, training recipies, inference and performance visit:\n", + "[github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/efficientnet)\n", + "and/or [NGC](https://ngc.nvidia.com/catalog/resources/nvidia:efficientnet_for_pytorch)\n", + "\n", + "### References\n", + "\n", + " - [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946)\n", + " - [model on NGC](https://ngc.nvidia.com/catalog/resources/nvidia:efficientnet_for_pytorch)\n", + " - [model on github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/efficientnet)\n", + " - [pretrained model on NGC (efficientnet-b0)](https://ngc.nvidia.com/catalog/models/nvidia:efficientnet_b0_pyt_amp)\n", + " - [pretrained model on NGC (efficientnet-b4)](https://ngc.nvidia.com/catalog/models/nvidia:efficientnet_b4_pyt_amp)\n", + " - [pretrained model on NGC (efficientnet-widese-b0)](https://ngc.nvidia.com/catalog/models/nvidia:efficientnet_widese_b0_pyt_amp)\n", + " - [pretrained model on NGC (efficientnet-widese-b4)](https://ngc.nvidia.com/catalog/models/nvidia:efficientnet_widese_b4_pyt_amp)\n", + " - [pretrained, quantized model on NGC (efficientnet-widese-b0)](https://ngc.nvidia.com/catalog/models/nvidia:efficientnet_widese_b0_pyt_amp)\n", + " - [pretrained, quantized model on NGC (efficientnet-widese-b4)](https://ngc.nvidia.com/catalog/models/nvidia:efficientnet_widese_b4_pyt_amp)" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/nvidia_deeplearningexamples_fastpitch.ipynb b/assets/hub/nvidia_deeplearningexamples_fastpitch.ipynb new file mode 100644 index 000000000000..ee5b0a18e335 --- /dev/null +++ b/assets/hub/nvidia_deeplearningexamples_fastpitch.ipynb @@ -0,0 +1,324 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "fbf710de", + "metadata": {}, + "source": [ + "### This notebook requires a GPU runtime to run.\n", + "### Please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# FastPitch 2\n", + "\n", + "*Author: NVIDIA*\n", + "\n", + "**The FastPitch model for generating mel spectrograms from text**\n", + "\n", + "\"alt\"\n", + "\n", + "\n", + "\n", + "### Model Description\n", + "\n", + "This notebook demonstrates a PyTorch implementation of the FastPitch model described in the [FastPitch](https://arxiv.org/abs/2006.06873) paper.\n", + "The FastPitch model generates mel-spectrograms and predicts a pitch contour from raw input text. In version 1.1, it does not need any pre-trained aligning model to bootstrap from. To get the audio waveform we need a second model that will produce it from the generated mel-spectrogram. In this notebook we use HiFi-GAN model for that second step.\n", + "\n", + "The FastPitch model is based on the [FastSpeech](https://arxiv.org/abs/1905.09263) model. The main differences between FastPitch vs FastSpeech are as follows:\n", + "* no dependence on external aligner (Transformer TTS, Tacotron 2); in version 1.1, FastPitch aligns audio to transcriptions by itself as in [One TTS Alignment To Rule Them All](https://arxiv.org/abs/2108.10447),\n", + "* FastPitch explicitly learns to predict the pitch contour,\n", + "* pitch conditioning removes harsh sounding artifacts and provides faster convergence,\n", + "* no need for distilling mel-spectrograms with a teacher model,\n", + "* capabilities to train a multi-speaker model.\n", + "\n", + "\n", + "#### Model architecture\n", + "\n", + "![FastPitch Architecture](https://raw.githubusercontent.com/NVIDIA/DeepLearningExamples/master/PyTorch/SpeechSynthesis/FastPitch/img/fastpitch_model.png)\n", + "\n", + "### Example\n", + "In the example below:\n", + "\n", + "- pretrained FastPitch and HiFiGAN models are loaded from torch.hub\n", + "- given tensor representation of an input text (\"Say this smoothly to prove you are not a robot.\"), FastPitch generates mel spectrogram\n", + "- HiFiGAN generates sound given the mel spectrogram\n", + "- the output sound is saved in an 'audio.wav' file\n", + "\n", + "To run the example you need some extra python packages installed. These are needed for preprocessing of text and audio, as well as for display and input/output handling. Finally, for better performance of FastPitch model, we download the CMU pronounciation dictionary." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a76cae65", + "metadata": {}, + "outputs": [], + "source": [ + "%%bash\n", + "apt-get update\n", + "apt-get install -y libsndfile1 wget\n", + "pip install numpy scipy librosa unidecode inflect librosa matplotlib==3.6.3\n", + "wget https://raw.githubusercontent.com/NVIDIA/NeMo/263a30be71e859cee330e5925332009da3e5efbc/scripts/tts_dataset_files/heteronyms-052722 -qO heteronyms\n", + "wget https://raw.githubusercontent.com/NVIDIA/NeMo/263a30be71e859cee330e5925332009da3e5efbc/scripts/tts_dataset_files/cmudict-0.7b_nv22.08 -qO cmudict-0.7b" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ddaea31f", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import matplotlib.pyplot as plt\n", + "from IPython.display import Audio\n", + "import warnings\n", + "warnings.filterwarnings('ignore')\n", + "\n", + "device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n", + "print(f'Using {device} for inference')" + ] + }, + { + "cell_type": "markdown", + "id": "e76ec37b", + "metadata": {}, + "source": [ + "Download and setup FastPitch generator model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "661b4e69", + "metadata": {}, + "outputs": [], + "source": [ + "fastpitch, generator_train_setup = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_fastpitch')" + ] + }, + { + "cell_type": "markdown", + "id": "806dcb3e", + "metadata": {}, + "source": [ + "Download and setup vocoder and denoiser models." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "703a99e7", + "metadata": {}, + "outputs": [], + "source": [ + "hifigan, vocoder_train_setup, denoiser = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_hifigan')" + ] + }, + { + "cell_type": "markdown", + "id": "49b8476d", + "metadata": {}, + "source": [ + "Verify that generator and vocoder models agree on input parameters." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a2465e6d", + "metadata": {}, + "outputs": [], + "source": [ + "CHECKPOINT_SPECIFIC_ARGS = [\n", + " 'sampling_rate', 'hop_length', 'win_length', 'p_arpabet', 'text_cleaners',\n", + " 'symbol_set', 'max_wav_value', 'prepend_space_to_text',\n", + " 'append_space_to_text']\n", + "\n", + "for k in CHECKPOINT_SPECIFIC_ARGS:\n", + "\n", + " v1 = generator_train_setup.get(k, None)\n", + " v2 = vocoder_train_setup.get(k, None)\n", + "\n", + " assert v1 is None or v2 is None or v1 == v2, \\\n", + " f'{k} mismatch in spectrogram generator and vocoder'" + ] + }, + { + "cell_type": "markdown", + "id": "a32481e1", + "metadata": {}, + "source": [ + "Put all models on available device." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fb26a339", + "metadata": {}, + "outputs": [], + "source": [ + "fastpitch.to(device)\n", + "hifigan.to(device)\n", + "denoiser.to(device)" + ] + }, + { + "cell_type": "markdown", + "id": "c8c30939", + "metadata": {}, + "source": [ + "Load text processor." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21f7495e", + "metadata": {}, + "outputs": [], + "source": [ + "tp = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_textprocessing_utils', cmudict_path=\"cmudict-0.7b\", heteronyms_path=\"heteronyms\")" + ] + }, + { + "cell_type": "markdown", + "id": "8ec32629", + "metadata": {}, + "source": [ + "Set the text to be synthetized, prepare input and set additional generation parameters." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "55b0f11e", + "metadata": {}, + "outputs": [], + "source": [ + "text = \"Say this smoothly, to prove you are not a robot.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c85f7f1e", + "metadata": {}, + "outputs": [], + "source": [ + "batches = tp.prepare_input_sequence([text], batch_size=1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "33fc7828", + "metadata": {}, + "outputs": [], + "source": [ + "gen_kw = {'pace': 1.0,\n", + " 'speaker': 0,\n", + " 'pitch_tgt': None,\n", + " 'pitch_transform': None}\n", + "denoising_strength = 0.005" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "468c9cac", + "metadata": {}, + "outputs": [], + "source": [ + "for batch in batches:\n", + " with torch.no_grad():\n", + " mel, mel_lens, *_ = fastpitch(batch['text'].to(device), **gen_kw)\n", + " audios = hifigan(mel).float()\n", + " audios = denoiser(audios.squeeze(1), denoising_strength)\n", + " audios = audios.squeeze(1) * vocoder_train_setup['max_wav_value']\n" + ] + }, + { + "cell_type": "markdown", + "id": "5a26d244", + "metadata": {}, + "source": [ + "Plot the intermediate spectorgram." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e5ca266c", + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure(figsize=(10,12))\n", + "res_mel = mel[0].detach().cpu().numpy()\n", + "plt.imshow(res_mel, origin='lower')\n", + "plt.xlabel('time')\n", + "plt.ylabel('frequency')\n", + "_=plt.title('Spectrogram')" + ] + }, + { + "cell_type": "markdown", + "id": "51bfa607", + "metadata": {}, + "source": [ + "Syntesize audio." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f2498134", + "metadata": {}, + "outputs": [], + "source": [ + "audio_numpy = audios[0].cpu().numpy()\n", + "Audio(audio_numpy, rate=22050)" + ] + }, + { + "cell_type": "markdown", + "id": "6e13c5bd", + "metadata": {}, + "source": [ + "Write audio to wav file." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0db32b10", + "metadata": {}, + "outputs": [], + "source": [ + "from scipy.io.wavfile import write\n", + "write(\"audio.wav\", vocoder_train_setup['sampling_rate'], audio_numpy)" + ] + }, + { + "cell_type": "markdown", + "id": "69fe3ee6", + "metadata": {}, + "source": [ + "### Details\n", + "For detailed information on model input and output, training recipies, inference and performance visit: [github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/HiFiGAN) and/or [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/resources/fastpitch_pyt)\n", + "\n", + "### References\n", + "\n", + " - [FastPitch paper](https://arxiv.org/abs/2006.06873)\n", + " - [FastPitch on NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/resources/fastpitch_pyt)\n", + " - [HiFi-GAN on NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/resources/hifigan_pyt)\n", + " - [FastPitch and HiFi-GAN on github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/HiFiGAN)" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/nvidia_deeplearningexamples_gpunet.ipynb b/assets/hub/nvidia_deeplearningexamples_gpunet.ipynb new file mode 100644 index 000000000000..79264af4906d --- /dev/null +++ b/assets/hub/nvidia_deeplearningexamples_gpunet.ipynb @@ -0,0 +1,218 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ecb3dc5e", + "metadata": {}, + "source": [ + "### This notebook requires a GPU runtime to run.\n", + "### Please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# GPUNet\n", + "\n", + "*Author: NVIDIA*\n", + "\n", + "**GPUNet is a new family of Convolutional Neural Networks designed to max out the performance of NVIDIA GPU and TensorRT.**\n", + "\n", + "\"alt\"\n", + "\n", + "\n", + "\n", + "### Model Description\n", + "GPUNets are a new family of deployment and production ready Convolutional Neural Networks from NVIDIA auto-designed to max out the performance of NVIDIA GPU and TensorRT. \n", + "\n", + "Crafted by NVIDIA AI using novel Neural Architecture Search(NAS) methods, GPUNet demonstrates state-of-the-art inference performance up to 2x faster than EfficientNet-X and FBNet-V3. This notebook allows you to load and test all the the GPUNet model implementation listed in our [CVPR-2022 paper](https://arxiv.org/pdf/2205.00841.pdf). You can use this notebook to quickly load each one of listed models to perform inference runs.\n", + "\n", + "### Example\n", + "In the example below the pretrained ***GPUNet-0*** model is loaded by default to perform inference on image and present the result. You can switch the default pre-trained model loading from GPUNet-0 to one of these: GPUNet-1, GPUNet-2, GPUNet-P0, GPUNet-P1, GPUNet-D1 or GPUNet-D2.\n", + "### Install pre-requisites\n", + "To run the example you need some extra python packages installed. These are needed for preprocessing images and visualization." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "af1a6cd3", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install validators matplotlib\n", + "!pip install timm==0.5.4" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "478c16f7", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "from PIL import Image\n", + "import torchvision.transforms as transforms\n", + "import numpy as np\n", + "import json\n", + "import requests\n", + "import matplotlib.pyplot as plt\n", + "import warnings\n", + "\n", + "warnings.filterwarnings('ignore')\n", + "%matplotlib inline\n", + "\n", + "\n", + "if torch.cuda.is_available():\n", + " device = torch.device(\"cuda\") \n", + " !nvidia-smi\n", + "else:\n", + " device = torch.device(\"cpu\")\n", + "\n", + "print(f'Using {device} for inference')" + ] + }, + { + "cell_type": "markdown", + "id": "b086b6be", + "metadata": {}, + "source": [ + "### Load Pretrained model\n", + "Loads NVIDIA GPUNet-0 model by default pre-trained on ImageNet dataset. You can switch the default pre-trained model loading from GPUNet-0 to one of the following models listed below. \n", + "\n", + "The model architecture is visible as output of the loaded model. For details architecture and latency info please refer to [architecture section](https://github.com/NVIDIA/DeepLearningExamples/tree/torchhub/PyTorch/Classification/GPUNet#model-architecture) in the original repo and Table#[3](https://arxiv.org/pdf/2205.00841.pdf) in the CVPR-2022 paper, respectively. \n", + "\n", + "Please pick and choose one of the following pre-trained models:\n", + "\n", + "| TorchHub model | Description |\n", + "| :----- | :----- |\n", + "| `GPUNet-0` | GPUNet-0 has the fastest measured latency on GV100 |\n", + "| `GPUNet-1` | GPUNet-1 has improved accuracy with one additional layer on GPUNet-0|\n", + "| `GPUNet-2` | GPUNet-2 has higher accuracy with two additional layers on GPUNet-0 |\n", + "| `GPUNet-P0` | GPUNet-P0 is the distilled model with higher accuracy than GPUNet-0 but similar latency|\n", + "| `GPUNet-P1` | GPUNet-P1 is distilled model with even higher accuracy than GPUNet-1 but similar latency |\n", + "| `GPUNet-D1` | GPUNet-D1 has the second highest accuracy amongst all GPUNets|\n", + "| `GPUNet-D2` | GPUNet-D2 has the highest accuracy amongst all GPUNets |" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4ce9da6c", + "metadata": {}, + "outputs": [], + "source": [ + "model_type = \"GPUNet-0\" # select one from above\n", + "precision = \"fp32\" # select either fp32 of fp16 (for better performance on GPU)\n", + "\n", + "gpunet = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_gpunet', pretrained=True, model_type=model_type, model_math=precision)\n", + "utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_convnets_processing_utils')\n", + "\n", + "gpunet.to(device)\n", + "gpunet.eval()" + ] + }, + { + "cell_type": "markdown", + "id": "99c602eb", + "metadata": {}, + "source": [ + "### Prepare inference data\n", + "Prepare sample input data for inference." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c25f4891", + "metadata": {}, + "outputs": [], + "source": [ + "uris = [\n", + " 'http://images.cocodataset.org/test-stuff2017/000000024309.jpg',\n", + " 'http://images.cocodataset.org/test-stuff2017/000000028117.jpg',\n", + " 'http://images.cocodataset.org/test-stuff2017/000000006149.jpg',\n", + " 'http://images.cocodataset.org/test-stuff2017/000000004954.jpg',\n", + "]\n", + "\n", + "batch = torch.cat(\n", + " [utils.prepare_input_from_uri(uri) for uri in uris]\n", + ").to(device)\n", + "\n", + "if precision == \"fp16\":\n", + " batch = batch.half()\n", + " \n", + "print(\"Ready to run inference...\")" + ] + }, + { + "cell_type": "markdown", + "id": "9249baab", + "metadata": {}, + "source": [ + "### Run inference\n", + "Use `pick_n_best(predictions=output, n=topN)` helper function to pick N most probable hypotheses according to the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b2e1d924", + "metadata": {}, + "outputs": [], + "source": [ + "with torch.no_grad():\n", + " output = torch.nn.functional.softmax(gpunet(batch), dim=1)\n", + " \n", + "results = utils.pick_n_best(predictions=output, n=5)" + ] + }, + { + "cell_type": "markdown", + "id": "bbed08b2", + "metadata": {}, + "source": [ + "### Display result" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c858ea08", + "metadata": {}, + "outputs": [], + "source": [ + "for uri, result in zip(uris, results):\n", + " img = Image.open(requests.get(uri, stream=True).raw)\n", + " img.thumbnail((256,256), Image.ANTIALIAS)\n", + " plt.imshow(img)\n", + " plt.show()\n", + " print(result)" + ] + }, + { + "cell_type": "markdown", + "id": "f5868b1d", + "metadata": {}, + "source": [ + "### Details\n", + "For detailed information on model input and output, training recipies, inference and performance visit:\n", + "[github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/GPUNet)\n", + "\n", + "### References\n", + "\n", + " - [GPUNets: Searching Deployable Convolution Neural Networks for GPUs](https://arxiv.org/pdf/2205.00841.pdf)\n", + " - [model on github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/GPUNet)\n", + " - [pretrained model on NGC (GPUNet-0)](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/models/gpunet_0_pyt_ckpt)\n", + " - [pretrained model on NGC (GPUNet-1)](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/models/gpunet_1_pyt_ckpt)\n", + " - [pretrained model on NGC (GPUNet-2)](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/models/gpunet_2_pyt_ckpt)\n", + " - [pretrained distilled model on NGC (GPUNet-P0)](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/models/gpunet_p0_pyt_ckpt)\n", + " - [pretrained, distilled model on NGC (GPUNet-P1)](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/models/gpunet_p1_pyt_ckpt)\n", + " - [pretrained, distilled model on NGC (GPUNet-D1)](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/models/gpunet_d1_pyt_ckpt)\n", + " - [pretrained, distilled model on NGC (GPUNet-D2)](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/models/gpunet_d2_pyt_ckpt)" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/nvidia_deeplearningexamples_hifigan.ipynb b/assets/hub/nvidia_deeplearningexamples_hifigan.ipynb new file mode 100644 index 000000000000..bafd49743292 --- /dev/null +++ b/assets/hub/nvidia_deeplearningexamples_hifigan.ipynb @@ -0,0 +1,318 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "f5c2575d", + "metadata": {}, + "source": [ + "### This notebook requires a GPU runtime to run.\n", + "### Please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# HiFi GAN\n", + "\n", + "*Author: NVIDIA*\n", + "\n", + "**The HiFi GAN model for generating waveforms from mel spectrograms**\n", + "\n", + "\"alt\"\n", + "\n", + "\n", + "\n", + "### Model Description\n", + "This notebook demonstrates a PyTorch implementation of the HiFi-GAN model described in the paper: [HiFi-GAN: Generative Adversarial Networks for Efficient and High Fidelity Speech Synthesis](https://arxiv.org/abs/2010.05646).\n", + "The HiFi-GAN model implements a spectrogram inversion model that allows to synthesize speech waveforms from mel-spectrograms. It follows the generative adversarial network (GAN) paradigm, and is composed of a generator and a discriminator. After training, the generator is used for synthesis, and the discriminator is discarded.\n", + "\n", + "Our implementation is based on the one [published by the authors of the paper](https://github.com/jik876/hifi-gan). We modify the original hyperparameters and provide an alternative training recipe, which enables training on larger batches and faster convergence. HiFi-GAN is trained on a publicly available [LJ Speech dataset](https://keithito.com/LJ-Speech-Dataset/). The samples demonstrate speech synthesized with our publicly available FastPitch and HiFi-GAN checkpoints.\n", + "\n", + "#### Model architecture\n", + "\n", + "![HiFiGAN Architecture](https://raw.githubusercontent.com/NVIDIA/DeepLearningExamples/master/PyTorch/SpeechSynthesis/HiFiGAN/img/hifigan_model.png)\n", + "\n", + "### Example\n", + "In the example below:\n", + "\n", + "- pretrained FastPitch and HiFiGAN models are loaded from torch.hub\n", + "- given tensor representation of an input text (\"Say this smoothly to prove you are not a robot.\"), FastPitch generates mel spectrogram \n", + "- HiFiGAN generates sound given the mel spectrogram\n", + "- the output sound is saved in an 'audio.wav' file\n", + "\n", + "To run the example you need some extra python packages installed. These are needed for preprocessing of text and audio, as well as for display and input/output handling. Finally, for better performance of FastPitch model, we download the CMU pronounciation dictionary." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "96d7b856", + "metadata": {}, + "outputs": [], + "source": [ + "%%bash\n", + "pip install numpy scipy librosa unidecode inflect librosa matplotlib==3.6.3\n", + "apt-get update\n", + "apt-get install -y libsndfile1 wget\n", + "wget https://raw.githubusercontent.com/NVIDIA/NeMo/263a30be71e859cee330e5925332009da3e5efbc/scripts/tts_dataset_files/heteronyms-052722 -qO heteronyms\n", + "wget https://raw.githubusercontent.com/NVIDIA/NeMo/263a30be71e859cee330e5925332009da3e5efbc/scripts/tts_dataset_files/cmudict-0.7b_nv22.08 -qO cmudict-0.7b" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e5499ef3", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import matplotlib.pyplot as plt\n", + "from IPython.display import Audio\n", + "import warnings\n", + "warnings.filterwarnings('ignore')\n", + "\n", + "device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n", + "print(f'Using {device} for inference')" + ] + }, + { + "cell_type": "markdown", + "id": "a69a7f5a", + "metadata": {}, + "source": [ + "Download and setup FastPitch generator model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0835f363", + "metadata": {}, + "outputs": [], + "source": [ + "fastpitch, generator_train_setup = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_fastpitch')" + ] + }, + { + "cell_type": "markdown", + "id": "2847bbb7", + "metadata": {}, + "source": [ + "Download and setup vocoder and denoiser models." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "40c89df3", + "metadata": {}, + "outputs": [], + "source": [ + "hifigan, vocoder_train_setup, denoiser = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_hifigan')" + ] + }, + { + "cell_type": "markdown", + "id": "594dce75", + "metadata": {}, + "source": [ + "Verify that generator and vocoder models agree on input parameters." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ec24d8eb", + "metadata": {}, + "outputs": [], + "source": [ + "CHECKPOINT_SPECIFIC_ARGS = [\n", + " 'sampling_rate', 'hop_length', 'win_length', 'p_arpabet', 'text_cleaners',\n", + " 'symbol_set', 'max_wav_value', 'prepend_space_to_text',\n", + " 'append_space_to_text']\n", + "\n", + "for k in CHECKPOINT_SPECIFIC_ARGS:\n", + "\n", + " v1 = generator_train_setup.get(k, None)\n", + " v2 = vocoder_train_setup.get(k, None)\n", + "\n", + " assert v1 is None or v2 is None or v1 == v2, \\\n", + " f'{k} mismatch in spectrogram generator and vocoder'" + ] + }, + { + "cell_type": "markdown", + "id": "f98e1cd9", + "metadata": {}, + "source": [ + "Put all models on available device." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "148a7bfb", + "metadata": {}, + "outputs": [], + "source": [ + "fastpitch.to(device)\n", + "hifigan.to(device)\n", + "denoiser.to(device)" + ] + }, + { + "cell_type": "markdown", + "id": "e6793a1f", + "metadata": {}, + "source": [ + "Load text processor." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7cdc435b", + "metadata": {}, + "outputs": [], + "source": [ + "tp = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_textprocessing_utils', cmudict_path=\"cmudict-0.7b\", heteronyms_path=\"heteronyms\")" + ] + }, + { + "cell_type": "markdown", + "id": "ef5e590f", + "metadata": {}, + "source": [ + "Set the text to be synthetized, prepare input and set additional generation parameters." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e9547b32", + "metadata": {}, + "outputs": [], + "source": [ + "text = \"Say this smoothly, to prove you are not a robot.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ab6fd6e9", + "metadata": {}, + "outputs": [], + "source": [ + "batches = tp.prepare_input_sequence([text], batch_size=1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "965a6363", + "metadata": {}, + "outputs": [], + "source": [ + "gen_kw = {'pace': 1.0,\n", + " 'speaker': 0,\n", + " 'pitch_tgt': None,\n", + " 'pitch_transform': None}\n", + "denoising_strength = 0.005" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ba2abe98", + "metadata": {}, + "outputs": [], + "source": [ + "for batch in batches:\n", + " with torch.no_grad():\n", + " mel, mel_lens, *_ = fastpitch(batch['text'].to(device), **gen_kw)\n", + " audios = hifigan(mel).float()\n", + " audios = denoiser(audios.squeeze(1), denoising_strength)\n", + " audios = audios.squeeze(1) * vocoder_train_setup['max_wav_value']\n" + ] + }, + { + "cell_type": "markdown", + "id": "3565039d", + "metadata": {}, + "source": [ + "Plot the intermediate spectorgram." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1e33b882", + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure(figsize=(10,12))\n", + "res_mel = mel[0].detach().cpu().numpy()\n", + "plt.imshow(res_mel, origin='lower')\n", + "plt.xlabel('time')\n", + "plt.ylabel('frequency')\n", + "_=plt.title('Spectrogram')" + ] + }, + { + "cell_type": "markdown", + "id": "bda6e7b8", + "metadata": {}, + "source": [ + "Syntesize audio." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f11f19fb", + "metadata": {}, + "outputs": [], + "source": [ + "audio_numpy = audios[0].cpu().numpy()\n", + "Audio(audio_numpy, rate=22050)" + ] + }, + { + "cell_type": "markdown", + "id": "e2c09135", + "metadata": {}, + "source": [ + "Write audio to wav file." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "45e8eaec", + "metadata": {}, + "outputs": [], + "source": [ + "from scipy.io.wavfile import write\n", + "write(\"audio.wav\", vocoder_train_setup['sampling_rate'], audio_numpy)" + ] + }, + { + "cell_type": "markdown", + "id": "de106a80", + "metadata": {}, + "source": [ + "### Details\n", + "For detailed information on model input and output, training recipies, inference and performance visit: [github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/HiFiGAN) and/or [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/resources/hifigan_pyt)\n", + "\n", + "### References\n", + "\n", + " - [HiFi-GAN: Generative Adversarial Networks for Efficient and High Fidelity Speech Synthesis](https://arxiv.org/abs/2010.05646)\n", + " - [Original implementation](https://github.com/jik876/hifi-gan)\n", + " - [FastPitch on NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/resources/fastpitch_pyt)\n", + " - [HiFi-GAN on NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/resources/hifigan_pyt)\n", + " - [FastPitch and HiFi-GAN on github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/HiFi-GAN)" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/nvidia_deeplearningexamples_resnet50.ipynb b/assets/hub/nvidia_deeplearningexamples_resnet50.ipynb new file mode 100644 index 000000000000..098e8f67f919 --- /dev/null +++ b/assets/hub/nvidia_deeplearningexamples_resnet50.ipynb @@ -0,0 +1,192 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "649b2722", + "metadata": {}, + "source": [ + "### This notebook requires a GPU runtime to run.\n", + "### Please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# ResNet50\n", + "\n", + "*Author: NVIDIA*\n", + "\n", + "**ResNet50 model trained with mixed precision using Tensor Cores.**\n", + "\n", + "\"alt\"\n", + "\n", + "\n", + "### Model Description\n", + "\n", + "The **_ResNet50 v1.5_** model is a modified version of the [original ResNet50 v1 model](https://arxiv.org/abs/1512.03385).\n", + "\n", + "The difference between v1 and v1.5 is that, in the bottleneck blocks which requires\n", + "downsampling, v1 has stride = 2 in the first 1x1 convolution, whereas v1.5 has stride = 2 in the 3x3 convolution.\n", + "\n", + "This difference makes ResNet50 v1.5 slightly more accurate (\\~0.5% top1) than v1, but comes with a small performance drawback (\\~5% imgs/sec).\n", + "\n", + "The model is initialized as described in [Delving deep into rectifiers: Surpassing human-level performance on ImageNet classification](https://arxiv.org/pdf/1502.01852.pdf)\n", + "\n", + "This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results over 2x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.\n", + "\n", + "Note that the ResNet50 v1.5 model can be deployed for inference on the [NVIDIA Triton Inference Server](https://github.com/triton-inference-server/server) using TorchScript, ONNX Runtime or TensorRT as an execution backend. For details check [NGC](https://ngc.nvidia.com/catalog/resources/nvidia:resnet_for_triton_from_pytorch)\n", + "\n", + "### Example\n", + "\n", + "In the example below we will use the pretrained **_ResNet50 v1.5_** model to perform inference on **_image_** and present the result.\n", + "\n", + "To run the example you need some extra python packages installed. These are needed for preprocessing images and visualization." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "551937f6", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install validators matplotlib" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b226f9f5", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "from PIL import Image\n", + "import torchvision.transforms as transforms\n", + "import numpy as np\n", + "import json\n", + "import requests\n", + "import matplotlib.pyplot as plt\n", + "import warnings\n", + "warnings.filterwarnings('ignore')\n", + "%matplotlib inline\n", + "\n", + "device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n", + "print(f'Using {device} for inference')" + ] + }, + { + "cell_type": "markdown", + "id": "7dabfd00", + "metadata": {}, + "source": [ + "Load the model pretrained on ImageNet dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "365f3b57", + "metadata": {}, + "outputs": [], + "source": [ + "resnet50 = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_resnet50', pretrained=True)\n", + "utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_convnets_processing_utils')\n", + "\n", + "resnet50.eval().to(device)" + ] + }, + { + "cell_type": "markdown", + "id": "865710ba", + "metadata": {}, + "source": [ + "Prepare sample input data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17de2abf", + "metadata": {}, + "outputs": [], + "source": [ + "uris = [\n", + " 'http://images.cocodataset.org/test-stuff2017/000000024309.jpg',\n", + " 'http://images.cocodataset.org/test-stuff2017/000000028117.jpg',\n", + " 'http://images.cocodataset.org/test-stuff2017/000000006149.jpg',\n", + " 'http://images.cocodataset.org/test-stuff2017/000000004954.jpg',\n", + "]\n", + "\n", + "batch = torch.cat(\n", + " [utils.prepare_input_from_uri(uri) for uri in uris]\n", + ").to(device)" + ] + }, + { + "cell_type": "markdown", + "id": "8bc5aef9", + "metadata": {}, + "source": [ + "Run inference. Use `pick_n_best(predictions=output, n=topN)` helper function to pick N most probably hypothesis according to the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "882c0864", + "metadata": {}, + "outputs": [], + "source": [ + "with torch.no_grad():\n", + " output = torch.nn.functional.softmax(resnet50(batch), dim=1)\n", + "\n", + "results = utils.pick_n_best(predictions=output, n=5)" + ] + }, + { + "cell_type": "markdown", + "id": "96d79cac", + "metadata": {}, + "source": [ + "Display the result." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2f47d573", + "metadata": {}, + "outputs": [], + "source": [ + "for uri, result in zip(uris, results):\n", + " img = Image.open(requests.get(uri, stream=True).raw)\n", + " img.thumbnail((256,256), Image.LANCZOS)\n", + " plt.imshow(img)\n", + " plt.show()\n", + " print(result)\n" + ] + }, + { + "cell_type": "markdown", + "id": "b2403e4f", + "metadata": {}, + "source": [ + "### Details\n", + "\n", + "For detailed information on model input and output, training recipies, inference and performance visit:\n", + "[github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/resnet50v1.5)\n", + "and/or [NGC](https://ngc.nvidia.com/catalog/resources/nvidia:resnet_50_v1_5_for_pytorch)\n", + "\n", + "### References\n", + "\n", + "- [Original ResNet50 v1 paper](https://arxiv.org/abs/1512.03385)\n", + "- [Delving deep into rectifiers: Surpassing human-level performance on ImageNet classification](https://arxiv.org/pdf/1502.01852.pdf)\n", + "- [model on github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/resnet50v1.5)\n", + "- [model on NGC](https://ngc.nvidia.com/catalog/resources/nvidia:resnet_50_v1_5_for_pytorch)\n", + "- [pretrained model on NGC](https://ngc.nvidia.com/catalog/models/nvidia:resnet50_pyt_amp)" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/nvidia_deeplearningexamples_resnext.ipynb b/assets/hub/nvidia_deeplearningexamples_resnext.ipynb new file mode 100644 index 000000000000..348805a46179 --- /dev/null +++ b/assets/hub/nvidia_deeplearningexamples_resnext.ipynb @@ -0,0 +1,201 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "82092ede", + "metadata": {}, + "source": [ + "### This notebook requires a GPU runtime to run.\n", + "### Please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# ResNeXt101\n", + "\n", + "*Author: NVIDIA*\n", + "\n", + "**ResNet with bottleneck 3x3 Convolutions substituted by 3x3 Grouped Convolutions, trained with mixed precision using Tensor Cores.**\n", + "\n", + "_ | _\n", + "- | -\n", + "![alt](https://pytorch.org/assets/images/ResNeXtArch.png) | ![alt](https://pytorch.org/assets/images/classification.jpg)\n", + "\n", + "\n", + "\n", + "### Model Description\n", + "\n", + "The ***ResNeXt101-32x4d*** is a model introduced in the [Aggregated Residual Transformations for Deep Neural Networks](https://arxiv.org/pdf/1611.05431.pdf) paper.\n", + "\n", + "It is based on regular ResNet model, substituting 3x3 convolutions inside the bottleneck block for 3x3 grouped convolutions.\n", + "\n", + "This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results 3x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.\n", + "\n", + "We use [NHWC data layout](https://pytorch.org/tutorials/intermediate/memory_format_tutorial.html) when training using Mixed Precision.\n", + "\n", + "Note that the ResNeXt101-32x4d model can be deployed for inference on the [NVIDIA Triton Inference Server](https://github.com/triton-inference-server/server) using TorchScript, ONNX Runtime or TensorRT as an execution backend. For details check [NGC](https://ngc.nvidia.com/catalog/resources/nvidia:resnext_for_triton_from_pytorch)\n", + "\n", + "#### Model architecture\n", + "\n", + "![ResNextArch](https://pytorch.org/assets/images/ResNeXtArch.png)\n", + "\n", + "_Image source: [Aggregated Residual Transformations for Deep Neural Networks](https://arxiv.org/pdf/1611.05431.pdf)_\n", + "\n", + "Image shows difference between ResNet bottleneck block and ResNeXt bottleneck block.\n", + "\n", + "ResNeXt101-32x4d model's cardinality equals to 32 and bottleneck width equals to 4.\n", + "### Example\n", + "\n", + "In the example below we will use the pretrained ***ResNeXt101-32x4d*** model to perform inference on images and present the result.\n", + "\n", + "To run the example you need some extra python packages installed. These are needed for preprocessing images and visualization." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8ad70a6f", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install validators matplotlib" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f10cc82e", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "from PIL import Image\n", + "import torchvision.transforms as transforms\n", + "import numpy as np\n", + "import json\n", + "import requests\n", + "import matplotlib.pyplot as plt\n", + "import warnings\n", + "warnings.filterwarnings('ignore')\n", + "%matplotlib inline\n", + "\n", + "device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n", + "print(f'Using {device} for inference')" + ] + }, + { + "cell_type": "markdown", + "id": "61269919", + "metadata": {}, + "source": [ + "Load the model pretrained on ImageNet dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "46242671", + "metadata": {}, + "outputs": [], + "source": [ + "resneXt = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_resneXt')\n", + "utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_convnets_processing_utils')\n", + "\n", + "resneXt.eval().to(device)" + ] + }, + { + "cell_type": "markdown", + "id": "e0667f07", + "metadata": {}, + "source": [ + "Prepare sample input data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6892409b", + "metadata": {}, + "outputs": [], + "source": [ + "uris = [\n", + " 'http://images.cocodataset.org/test-stuff2017/000000024309.jpg',\n", + " 'http://images.cocodataset.org/test-stuff2017/000000028117.jpg',\n", + " 'http://images.cocodataset.org/test-stuff2017/000000006149.jpg',\n", + " 'http://images.cocodataset.org/test-stuff2017/000000004954.jpg',\n", + "]\n", + "\n", + "\n", + "batch = torch.cat(\n", + " [utils.prepare_input_from_uri(uri) for uri in uris]\n", + ").to(device)" + ] + }, + { + "cell_type": "markdown", + "id": "036bea69", + "metadata": {}, + "source": [ + "Run inference. Use `pick_n_best(predictions=output, n=topN)` helper function to pick N most probably hypothesis according to the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c633b6d0", + "metadata": {}, + "outputs": [], + "source": [ + "with torch.no_grad():\n", + " output = torch.nn.functional.softmax(resneXt(batch), dim=1)\n", + " \n", + "results = utils.pick_n_best(predictions=output, n=5)" + ] + }, + { + "cell_type": "markdown", + "id": "fb708853", + "metadata": {}, + "source": [ + "Display the result." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bced97e2", + "metadata": {}, + "outputs": [], + "source": [ + "for uri, result in zip(uris, results):\n", + " img = Image.open(requests.get(uri, stream=True).raw)\n", + " img.thumbnail((256,256), Image.ANTIALIAS)\n", + " plt.imshow(img)\n", + " plt.show()\n", + " print(result)\n" + ] + }, + { + "cell_type": "markdown", + "id": "abbe3cae", + "metadata": {}, + "source": [ + "### Details\n", + "For detailed information on model input and output, training recipies, inference and performance visit:\n", + "[github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/resnext101-32x4d)\n", + "and/or [NGC](https://ngc.nvidia.com/catalog/resources/nvidia:resnext_for_pytorch)\n", + "\n", + "\n", + "### References\n", + "\n", + " - [Aggregated Residual Transformations for Deep Neural Networks](https://arxiv.org/pdf/1611.05431.pdf)\n", + " - [model on github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/resnext101-32x4d)\n", + " - [model on NGC](https://ngc.nvidia.com/catalog/resources/nvidia:resnext_for_pytorch)\n", + " - [pretrained model on NGC](https://ngc.nvidia.com/catalog/models/nvidia:resnext101_32x4d_pyt_amp)" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/nvidia_deeplearningexamples_se-resnext.ipynb b/assets/hub/nvidia_deeplearningexamples_se-resnext.ipynb new file mode 100644 index 000000000000..11d73edaa76c --- /dev/null +++ b/assets/hub/nvidia_deeplearningexamples_se-resnext.ipynb @@ -0,0 +1,201 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "cb67ab5c", + "metadata": {}, + "source": [ + "### This notebook requires a GPU runtime to run.\n", + "### Please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# SE-ResNeXt101\n", + "\n", + "*Author: NVIDIA*\n", + "\n", + "**ResNeXt with Squeeze-and-Excitation module added, trained with mixed precision using Tensor Cores.**\n", + "\n", + "_ | _\n", + "- | -\n", + "![alt](https://pytorch.org/assets/images/SEArch.png) | ![alt](https://pytorch.org/assets/images/classification.jpg)\n", + "\n", + "\n", + "\n", + "### Model Description\n", + "\n", + "The ***SE-ResNeXt101-32x4d*** is a [ResNeXt101-32x4d](https://arxiv.org/pdf/1611.05431.pdf)\n", + "model with added Squeeze-and-Excitation module introduced\n", + "in the [Squeeze-and-Excitation Networks](https://arxiv.org/pdf/1709.01507.pdf) paper.\n", + "\n", + "This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results 3x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.\n", + "\n", + "We use [NHWC data layout](https://pytorch.org/tutorials/intermediate/memory_format_tutorial.html) when training using Mixed Precision.\n", + "\n", + "#### Model architecture\n", + "\n", + "![SEArch](https://pytorch.org/assets/images/SEArch.png)\n", + "\n", + "_Image source: [Squeeze-and-Excitation Networks](https://arxiv.org/pdf/1709.01507.pdf)_\n", + "\n", + "Image shows the architecture of SE block and where is it placed in ResNet bottleneck block.\n", + "\n", + "\n", + "Note that the SE-ResNeXt101-32x4d model can be deployed for inference on the [NVIDIA Triton Inference Server](https://github.com/triton-inference-server/server) using TorchScript, ONNX Runtime or TensorRT as an execution backend. For details check [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/resources/se_resnext_for_triton_from_pytorch).\n", + "\n", + "### Example\n", + "\n", + "In the example below we will use the pretrained ***SE-ResNeXt101-32x4d*** model to perform inference on images and present the result.\n", + "\n", + "To run the example you need some extra python packages installed. These are needed for preprocessing images and visualization." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d2f3d7e9", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install validators matplotlib" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f9859a1c", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "from PIL import Image\n", + "import torchvision.transforms as transforms\n", + "import numpy as np\n", + "import json\n", + "import requests\n", + "import matplotlib.pyplot as plt\n", + "import warnings\n", + "warnings.filterwarnings('ignore')\n", + "%matplotlib inline\n", + "\n", + "device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n", + "print(f'Using {device} for inference')" + ] + }, + { + "cell_type": "markdown", + "id": "c2ab83ac", + "metadata": {}, + "source": [ + "Load the model pretrained on ImageNet dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5818ec83", + "metadata": {}, + "outputs": [], + "source": [ + "resneXt = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_se_resnext101_32x4d')\n", + "utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_convnets_processing_utils')\n", + "\n", + "resneXt.eval().to(device)" + ] + }, + { + "cell_type": "markdown", + "id": "17bffa3e", + "metadata": {}, + "source": [ + "Prepare sample input data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "992d0ac8", + "metadata": {}, + "outputs": [], + "source": [ + "uris = [\n", + " 'http://images.cocodataset.org/test-stuff2017/000000024309.jpg',\n", + " 'http://images.cocodataset.org/test-stuff2017/000000028117.jpg',\n", + " 'http://images.cocodataset.org/test-stuff2017/000000006149.jpg',\n", + " 'http://images.cocodataset.org/test-stuff2017/000000004954.jpg',\n", + "]\n", + "\n", + "\n", + "batch = torch.cat(\n", + " [utils.prepare_input_from_uri(uri) for uri in uris]\n", + ").to(device)" + ] + }, + { + "cell_type": "markdown", + "id": "8edab49f", + "metadata": {}, + "source": [ + "Run inference. Use `pick_n_best(predictions=output, n=topN)` helper function to pick N most probable hypotheses according to the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "71272f7a", + "metadata": {}, + "outputs": [], + "source": [ + "with torch.no_grad():\n", + " output = torch.nn.functional.softmax(resneXt(batch), dim=1)\n", + " \n", + "results = utils.pick_n_best(predictions=output, n=5)" + ] + }, + { + "cell_type": "markdown", + "id": "37c5fa6b", + "metadata": {}, + "source": [ + "Display the result." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3ae2509b", + "metadata": {}, + "outputs": [], + "source": [ + "for uri, result in zip(uris, results):\n", + " img = Image.open(requests.get(uri, stream=True).raw)\n", + " img.thumbnail((256,256), Image.ANTIALIAS)\n", + " plt.imshow(img)\n", + " plt.show()\n", + " print(result)\n" + ] + }, + { + "cell_type": "markdown", + "id": "7e11165d", + "metadata": {}, + "source": [ + "### Details\n", + "For detailed information on model input and output, training recipies, inference and performance visit:\n", + "[github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/se-resnext101-32x4d)\n", + "and/or [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/resources/se_resnext_for_pytorch).\n", + "\n", + "\n", + "### References\n", + "\n", + " - [Squeeze-and-Excitation Networks](https://arxiv.org/pdf/1709.01507.pdf)\n", + " - [model on github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/se-resnext101-32x4d)\n", + " - [model on NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/resources/se_resnext_for_pytorch)\n", + " - [pretrained model on NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/models/seresnext101_32x4d_pyt_amp)" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/nvidia_deeplearningexamples_ssd.ipynb b/assets/hub/nvidia_deeplearningexamples_ssd.ipynb index e91c207b5f59..69982e208582 100644 --- a/assets/hub/nvidia_deeplearningexamples_ssd.ipynb +++ b/assets/hub/nvidia_deeplearningexamples_ssd.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "723a4209", "metadata": {}, "source": [ "### This notebook requires a GPU runtime to run.\n", @@ -17,28 +18,7 @@ "\n", "_ | _\n", "- | -\n", - "![alt](https://pytorch.org/assets/images/ssd_diagram.png) | ![alt](https://pytorch.org/assets/images/ssd.png)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import torch \n", - "precision = 'fp32'\n", - "ssd_model = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_ssd', model_math=precision)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "will load an SSD model pretrained on COCO dataset from Torch Hub.\n", - "\n", - "Setting precision='fp16' will load a checkpoint trained with [mixed precision](https://arxiv.org/abs/1710.03740) into architecture enabling execution on [Tensor Cores](https://developer.nvidia.com/tensor-cores).\n", - "Handling mixed precision data requires [Apex](https://github.com/NVIDIA/apex) library.\n", + "![alt](https://pytorch.org/assets/images/ssd_diagram.png) | ![alt](https://pytorch.org/assets/images/ssd.png)\n", "\n", "\n", "\n", @@ -56,7 +36,7 @@ "[Speed/accuracy trade-offs for modern convolutional object detectors](https://arxiv.org/abs/1611.10012)\n", "paper, the following enhancements were made to the backbone:\n", "* The conv5_x, avgpool, fc and softmax layers were removed from the original classification model.\n", - "* All strides in conv4_x are set to 1x1. \n", + "* All strides in conv4_x are set to 1x1.\n", "\n", "The backbone is followed by 5 additional convolutional layers.\n", "In addition to the convolutional layers, we attached 6 detection heads:\n", @@ -68,15 +48,15 @@ "\n", "### Example\n", "\n", - "In the example below we will use the pretrained SSD model loaded from Torch Hub to detect objects in sample images and visualize the result.\n", + "In the example below we will use the pretrained SSD model to detect objects in sample images and visualize the result.\n", "\n", - "To run the example you need some extra python packages installed.\n", - "These are needed for preprocessing images and visualization." + "To run the example you need some extra python packages installed. These are needed for preprocessing images and visualization." ] }, { "cell_type": "code", "execution_count": null, + "id": "866aaca3", "metadata": {}, "outputs": [], "source": [ @@ -86,22 +66,27 @@ }, { "cell_type": "markdown", + "id": "01a1d68e", "metadata": {}, "source": [ - "For convenient and comprehensive formatting of input and output of the model, load a set of utility methods." + "Load an SSD model pretrained on COCO dataset, as well as a set of utility methods for convenient and comprehensive formatting of input and output of the model." ] }, { "cell_type": "code", "execution_count": null, + "id": "921e5bc9", "metadata": {}, "outputs": [], "source": [ + "import torch\n", + "ssd_model = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_ssd')\n", "utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_ssd_processing_utils')" ] }, { "cell_type": "markdown", + "id": "5edbf8de", "metadata": {}, "source": [ "Now, prepare the loaded model for inference" @@ -110,6 +95,7 @@ { "cell_type": "code", "execution_count": null, + "id": "05cd687d", "metadata": {}, "outputs": [], "source": [ @@ -119,6 +105,7 @@ }, { "cell_type": "markdown", + "id": "5d0466ad", "metadata": {}, "source": [ "Prepare input images for object detection.\n", @@ -128,6 +115,7 @@ { "cell_type": "code", "execution_count": null, + "id": "31fe8862", "metadata": {}, "outputs": [], "source": [ @@ -140,6 +128,7 @@ }, { "cell_type": "markdown", + "id": "43cdba2e", "metadata": {}, "source": [ "Format the images to comply with the network input and convert them to tensor." @@ -148,15 +137,17 @@ { "cell_type": "code", "execution_count": null, + "id": "802ac664", "metadata": {}, "outputs": [], "source": [ "inputs = [utils.prepare_input(uri) for uri in uris]\n", - "tensor = utils.prepare_tensor(inputs, precision == 'fp16')" + "tensor = utils.prepare_tensor(inputs)" ] }, { "cell_type": "markdown", + "id": "ce59371f", "metadata": {}, "source": [ "Run the SSD network to perform object detection." @@ -165,6 +156,7 @@ { "cell_type": "code", "execution_count": null, + "id": "e973afd2", "metadata": {}, "outputs": [], "source": [ @@ -174,6 +166,7 @@ }, { "cell_type": "markdown", + "id": "4d2e73e3", "metadata": {}, "source": [ "By default, raw output from SSD network per input image contains\n", @@ -184,6 +177,7 @@ { "cell_type": "code", "execution_count": null, + "id": "091a6726", "metadata": {}, "outputs": [], "source": [ @@ -193,6 +187,7 @@ }, { "cell_type": "markdown", + "id": "083f378b", "metadata": {}, "source": [ "The model was trained on COCO dataset, which we need to access in order to translate class IDs into object names.\n", @@ -202,6 +197,7 @@ { "cell_type": "code", "execution_count": null, + "id": "a972bb87", "metadata": {}, "outputs": [], "source": [ @@ -210,6 +206,7 @@ }, { "cell_type": "markdown", + "id": "628d6fb3", "metadata": {}, "source": [ "Finally, let's visualize our detections" @@ -218,6 +215,7 @@ { "cell_type": "code", "execution_count": null, + "id": "92c04db6", "metadata": {}, "outputs": [], "source": [ @@ -242,24 +240,25 @@ }, { "cell_type": "markdown", + "id": "0b95e054", "metadata": {}, "source": [ "### Details\n", - "For detailed information on model input and output, \n", - "training recipies, inference and performance visit: \n", - "[github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Detection/SSD) \n", - "and/or [NGC](https://ngc.nvidia.com/catalog/model-scripts/nvidia:ssd_for_pytorch)\n", + "For detailed information on model input and output,\n", + "training recipies, inference and performance visit:\n", + "[github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Detection/SSD)\n", + "and/or [NGC](https://ngc.nvidia.com/catalog/resources/nvidia:ssd_for_pytorch)\n", "\n", "### References\n", "\n", " - [SSD: Single Shot MultiBox Detector](https://arxiv.org/abs/1512.02325) paper\n", " - [Speed/accuracy trade-offs for modern convolutional object detectors](https://arxiv.org/abs/1611.10012) paper\n", - " - [SSD on NGC](https://ngc.nvidia.com/catalog/model-scripts/nvidia:ssd_for_pytorch)\n", + " - [SSD on NGC](https://ngc.nvidia.com/catalog/resources/nvidia:ssd_for_pytorch)\n", " - [SSD on github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Detection/SSD)" ] } ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/nvidia_deeplearningexamples_tacotron2.ipynb b/assets/hub/nvidia_deeplearningexamples_tacotron2.ipynb index 8528a37e7cf7..dde2eb226e9f 100644 --- a/assets/hub/nvidia_deeplearningexamples_tacotron2.ipynb +++ b/assets/hub/nvidia_deeplearningexamples_tacotron2.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "ff815dde", "metadata": {}, "source": [ "### This notebook requires a GPU runtime to run.\n", @@ -15,24 +16,9 @@ "\n", "**The Tacotron 2 model for generating mel spectrograms from text**\n", "\n", - "\"alt\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "tacotron2 = torch.hub.load('nvidia/DeepLearningExamples:torchhub', 'nvidia_tacotron2')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "will load the Tacotron2 model pre-trained on [LJ Speech dataset](https://keithito.com/LJ-Speech-Dataset/)\n", + "\"alt\"\n", + "\n", + "\n", "\n", "### Model Description\n", "\n", @@ -44,7 +30,7 @@ "\n", "In the example below:\n", "- pretrained Tacotron2 and Waveglow models are loaded from torch.hub\n", - "- Tacotron2 generates mel spectrogram given tensor represantation of an input text (\"Hello world, I missed you\")\n", + "- Given a tensor representation of the input text (\"Hello world, I missed you so much\"), Tacotron2 generates a Mel spectrogram as shown on the illustration\n", "- Waveglow generates sound given the mel spectrogram\n", "- the output sound is saved in an 'audio.wav' file\n", "\n", @@ -55,95 +41,112 @@ { "cell_type": "code", "execution_count": null, + "id": "7f6ac940", "metadata": {}, "outputs": [], "source": [ "%%bash\n", - "pip install numpy scipy librosa unidecode inflect librosa" + "pip install numpy scipy librosa unidecode inflect librosa\n", + "apt-get update\n", + "apt-get install -y libsndfile1" + ] + }, + { + "cell_type": "markdown", + "id": "d3612c17", + "metadata": {}, + "source": [ + "Load the Tacotron2 model pre-trained on [LJ Speech dataset](https://keithito.com/LJ-Speech-Dataset/) and prepare it for inference:" ] }, { "cell_type": "code", "execution_count": null, + "id": "d4a1a023", "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", - "from scipy.io.wavfile import write" + "import torch\n", + "tacotron2 = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_tacotron2', model_math='fp16')\n", + "tacotron2 = tacotron2.to('cuda')\n", + "tacotron2.eval()" ] }, { "cell_type": "markdown", + "id": "75fae8d2", "metadata": {}, "source": [ - "Prepare tacotron2 for inference" + "Load pretrained WaveGlow model" ] }, { "cell_type": "code", "execution_count": null, + "id": "58efde1d", "metadata": {}, "outputs": [], "source": [ - "tacotron2 = tacotron2.to('cuda')\n", - "tacotron2.eval()" + "waveglow = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_waveglow', model_math='fp16')\n", + "waveglow = waveglow.remove_weightnorm(waveglow)\n", + "waveglow = waveglow.to('cuda')\n", + "waveglow.eval()" ] }, { "cell_type": "markdown", + "id": "7ef68115", "metadata": {}, "source": [ - "Load waveglow from PyTorch Hub" + "Now, let's make the model say:" ] }, { "cell_type": "code", "execution_count": null, + "id": "be6c6e1a", "metadata": {}, "outputs": [], "source": [ - "waveglow = torch.hub.load('nvidia/DeepLearningExamples:torchhub', 'nvidia_waveglow')\n", - "waveglow = waveglow.remove_weightnorm(waveglow)\n", - "waveglow = waveglow.to('cuda')\n", - "waveglow.eval()" + "text = \"Hello world, I missed you so much.\"" ] }, { "cell_type": "markdown", + "id": "dceb771a", "metadata": {}, "source": [ - "Now, let's make the model say *\"hello world, I missed you\"*" + "Format the input using utility methods" ] }, { "cell_type": "code", "execution_count": null, + "id": "c24742bc", "metadata": {}, "outputs": [], "source": [ - "text = \"hello world, I missed you\"" + "utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_tts_utils')\n", + "sequences, lengths = utils.prepare_input_sequence([text])" ] }, { "cell_type": "markdown", + "id": "e625393c", "metadata": {}, "source": [ - "Now chain pre-processing -> tacotron2 -> waveglow" + "Run the chained models:" ] }, { "cell_type": "code", "execution_count": null, + "id": "fb4ee2dd", "metadata": {}, "outputs": [], "source": [ - "# preprocessing\n", - "sequence = np.array(tacotron2.text_to_sequence(text, ['english_cleaners']))[None, :]\n", - "sequence = torch.from_numpy(sequence).to(device='cuda', dtype=torch.int64)\n", - "\n", - "# run the models\n", "with torch.no_grad():\n", - " _, mel, _, _ = tacotron2.infer(sequence)\n", + " mel, _, _ = tacotron2.infer(sequences, lengths)\n", " audio = waveglow.infer(mel)\n", "audio_numpy = audio[0].data.cpu().numpy()\n", "rate = 22050" @@ -151,6 +154,7 @@ }, { "cell_type": "markdown", + "id": "8289c037", "metadata": {}, "source": [ "You can write it to a file and listen to it" @@ -159,14 +163,17 @@ { "cell_type": "code", "execution_count": null, + "id": "2f5bad6b", "metadata": {}, "outputs": [], "source": [ + "from scipy.io.wavfile import write\n", "write(\"audio.wav\", rate, audio_numpy)" ] }, { "cell_type": "markdown", + "id": "1c37ef75", "metadata": {}, "source": [ "Alternatively, play it right away in a notebook with IPython widgets" @@ -175,6 +182,7 @@ { "cell_type": "code", "execution_count": null, + "id": "a2664845", "metadata": {}, "outputs": [], "source": [ @@ -184,21 +192,22 @@ }, { "cell_type": "markdown", + "id": "163b4f0f", "metadata": {}, "source": [ "### Details\n", - "For detailed information on model input and output, training recipies, inference and performance visit: [github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/Tacotron2) and/or [NGC](https://ngc.nvidia.com/catalog/model-scripts/nvidia:tacotron_2_and_waveglow_for_pytorch)\n", + "For detailed information on model input and output, training recipies, inference and performance visit: [github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/Tacotron2) and/or [NGC](https://ngc.nvidia.com/catalog/resources/nvidia:tacotron_2_and_waveglow_for_pytorch)\n", "\n", "### References\n", "\n", " - [Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions](https://arxiv.org/abs/1712.05884)\n", " - [WaveGlow: A Flow-based Generative Network for Speech Synthesis](https://arxiv.org/abs/1811.00002)\n", - " - [Tacotron2 and WaveGlow on NGC](https://ngc.nvidia.com/catalog/model-scripts/nvidia:tacotron_2_and_waveglow_for_pytorch)\n", + " - [Tacotron2 and WaveGlow on NGC](https://ngc.nvidia.com/catalog/resources/nvidia:tacotron_2_and_waveglow_for_pytorch)\n", " - [Tacotron2 and Waveglow on github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/Tacotron2)" ] } ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/nvidia_deeplearningexamples_waveglow.ipynb b/assets/hub/nvidia_deeplearningexamples_waveglow.ipynb index db60fb8846b3..591fae1edaab 100644 --- a/assets/hub/nvidia_deeplearningexamples_waveglow.ipynb +++ b/assets/hub/nvidia_deeplearningexamples_waveglow.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "3b9d7786", "metadata": {}, "source": [ "### This notebook requires a GPU runtime to run.\n", @@ -15,24 +16,9 @@ "\n", "**WaveGlow model for generating speech from mel spectrograms (generated by Tacotron2)**\n", "\n", - "\"alt\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "waveglow = torch.hub.load('nvidia/DeepLearningExamples:torchhub', 'nvidia_waveglow')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "will load the WaveGlow model pre-trained on [LJ Speech dataset](https://keithito.com/LJ-Speech-Dataset/)\n", + "\"alt\"\n", + "\n", + "\n", "\n", "### Model Description\n", "\n", @@ -42,7 +28,7 @@ "\n", "In the example below:\n", "- pretrained Tacotron2 and Waveglow models are loaded from torch.hub\n", - "- Tacotron2 generates mel spectrogram given tensor represantation of an input text (\"Hello world, I missed you\")\n", + "- Given a tensor representation of the input text (\"Hello world, I missed you so much\"), Tacotron2 generates a Mel spectrogram as shown on the illustration\n", "- Waveglow generates sound given the mel spectrogram\n", "- the output sound is saved in an 'audio.wav' file\n", "\n", @@ -53,33 +39,47 @@ { "cell_type": "code", "execution_count": null, + "id": "5495a25d", "metadata": {}, "outputs": [], "source": [ "%%bash\n", - "pip install numpy scipy librosa unidecode inflect librosa" + "pip install numpy scipy librosa unidecode inflect librosa\n", + "apt-get update\n", + "apt-get install -y libsndfile1" + ] + }, + { + "cell_type": "markdown", + "id": "a69d8b25", + "metadata": {}, + "source": [ + "Load the WaveGlow model pre-trained on [LJ Speech dataset](https://keithito.com/LJ-Speech-Dataset/)" ] }, { "cell_type": "code", "execution_count": null, + "id": "bc4d5b04", "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", - "from scipy.io.wavfile import write" + "import torch\n", + "waveglow = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_waveglow', model_math='fp32')" ] }, { "cell_type": "markdown", + "id": "5216aae6", "metadata": {}, "source": [ - "Prepare the waveglow model for inference" + "Prepare the WaveGlow model for inference" ] }, { "cell_type": "code", "execution_count": null, + "id": "256825ab", "metadata": {}, "outputs": [], "source": [ @@ -90,58 +90,78 @@ }, { "cell_type": "markdown", + "id": "beb19d0d", "metadata": {}, "source": [ - "Load tacotron2 from PyTorch Hub" + "Load a pretrained Tacotron2 model" ] }, { "cell_type": "code", "execution_count": null, + "id": "0373466b", "metadata": {}, "outputs": [], "source": [ - "tacotron2 = torch.hub.load('nvidia/DeepLearningExamples:torchhub', 'nvidia_tacotron2')\n", + "tacotron2 = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_tacotron2', model_math='fp32')\n", "tacotron2 = tacotron2.to('cuda')\n", "tacotron2.eval()" ] }, { "cell_type": "markdown", + "id": "396732b6", "metadata": {}, "source": [ - "Now, let's make the model say *\"hello world, I missed you\"*" + "Now, let's make the model say:" ] }, { "cell_type": "code", "execution_count": null, + "id": "6d41eac9", "metadata": {}, "outputs": [], "source": [ - "text = \"hello world, I missed you\"" + "text = \"hello world, I missed you so much\"" ] }, { "cell_type": "markdown", + "id": "cf568cae", "metadata": {}, "source": [ - "Now chain pre-processing -> tacotron2 -> waveglow" + "Format the input using utility methods" ] }, { "cell_type": "code", "execution_count": null, + "id": "1ef1c9bc", + "metadata": {}, + "outputs": [], + "source": [ + "utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_tts_utils')\n", + "sequences, lengths = utils.prepare_input_sequence([text])" + ] + }, + { + "cell_type": "markdown", + "id": "1ef1d0b1", + "metadata": {}, + "source": [ + "Run the chained models" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "635a4285", "metadata": {}, "outputs": [], "source": [ - "# preprocessing\n", - "sequence = np.array(tacotron2.text_to_sequence(text, ['english_cleaners']))[None, :]\n", - "sequence = torch.from_numpy(sequence).to(device='cuda', dtype=torch.int64)\n", - "\n", - "# run the models\n", "with torch.no_grad():\n", - " _, mel, _, _ = tacotron2.infer(sequence)\n", + " mel, _, _ = tacotron2.infer(sequences, lengths)\n", " audio = waveglow.infer(mel)\n", "audio_numpy = audio[0].data.cpu().numpy()\n", "rate = 22050" @@ -149,6 +169,7 @@ }, { "cell_type": "markdown", + "id": "3fcbd8e4", "metadata": {}, "source": [ "You can write it to a file and listen to it" @@ -157,14 +178,17 @@ { "cell_type": "code", "execution_count": null, + "id": "7988fc2e", "metadata": {}, "outputs": [], "source": [ + "from scipy.io.wavfile import write\n", "write(\"audio.wav\", rate, audio_numpy)" ] }, { "cell_type": "markdown", + "id": "ca7f89f4", "metadata": {}, "source": [ "Alternatively, play it right away in a notebook with IPython widgets" @@ -173,6 +197,7 @@ { "cell_type": "code", "execution_count": null, + "id": "0bc20db9", "metadata": {}, "outputs": [], "source": [ @@ -182,21 +207,22 @@ }, { "cell_type": "markdown", + "id": "06e2be82", "metadata": {}, "source": [ "### Details\n", - "For detailed information on model input and output, training recipies, inference and performance visit: [github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/Tacotron2) and/or [NGC](https://ngc.nvidia.com/catalog/model-scripts/nvidia:tacotron_2_and_waveglow_for_pytorch)\n", + "For detailed information on model input and output, training recipies, inference and performance visit: [github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/Tacotron2) and/or [NGC](https://ngc.nvidia.com/catalog/resources/nvidia:tacotron_2_and_waveglow_for_pytorch)\n", "\n", "### References\n", "\n", " - [Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions](https://arxiv.org/abs/1712.05884)\n", " - [WaveGlow: A Flow-based Generative Network for Speech Synthesis](https://arxiv.org/abs/1811.00002)\n", - " - [Tacotron2 and WaveGlow on NGC](https://ngc.nvidia.com/catalog/model-scripts/nvidia:tacotron_2_and_waveglow_for_pytorch)\n", + " - [Tacotron2 and WaveGlow on NGC](https://ngc.nvidia.com/catalog/resources/nvidia:tacotron_2_and_waveglow_for_pytorch)\n", " - [Tacotron2 and Waveglow on github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/Tacotron2)" ] } ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/pytorch_fairseq_roberta.ipynb b/assets/hub/pytorch_fairseq_roberta.ipynb index a2c972670663..51cec6fc83d5 100644 --- a/assets/hub/pytorch_fairseq_roberta.ipynb +++ b/assets/hub/pytorch_fairseq_roberta.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "b9b0f816", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -42,15 +43,17 @@ { "cell_type": "code", "execution_count": null, + "id": "41bba98b", "metadata": {}, "outputs": [], "source": [ "%%bash\n", - "pip install regex requests" + "pip install regex requests hydra-core omegaconf" ] }, { "cell_type": "markdown", + "id": "db7cc4bf", "metadata": {}, "source": [ "### Example\n", @@ -61,6 +64,7 @@ { "cell_type": "code", "execution_count": null, + "id": "69d7edfc", "metadata": {}, "outputs": [], "source": [ @@ -71,6 +75,7 @@ }, { "cell_type": "markdown", + "id": "69f67aa3", "metadata": {}, "source": [ "##### Apply Byte-Pair Encoding (BPE) to input text" @@ -79,6 +84,7 @@ { "cell_type": "code", "execution_count": null, + "id": "c95887a1", "metadata": {}, "outputs": [], "source": [ @@ -89,6 +95,7 @@ }, { "cell_type": "markdown", + "id": "f4a84c78", "metadata": {}, "source": [ "##### Extract features from RoBERTa" @@ -97,6 +104,7 @@ { "cell_type": "code", "execution_count": null, + "id": "43121292", "metadata": {}, "outputs": [], "source": [ @@ -112,6 +120,7 @@ }, { "cell_type": "markdown", + "id": "1b8987a7", "metadata": {}, "source": [ "##### Use RoBERTa for sentence-pair classification tasks" @@ -120,6 +129,7 @@ { "cell_type": "code", "execution_count": null, + "id": "db44c214", "metadata": {}, "outputs": [], "source": [ @@ -141,6 +151,7 @@ }, { "cell_type": "markdown", + "id": "048210ab", "metadata": {}, "source": [ "##### Register a new (randomly initialized) classification head" @@ -149,6 +160,7 @@ { "cell_type": "code", "execution_count": null, + "id": "19fd2ac0", "metadata": {}, "outputs": [], "source": [ @@ -158,6 +170,7 @@ }, { "cell_type": "markdown", + "id": "2073f9ae", "metadata": {}, "source": [ "### References\n", @@ -173,5 +186,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/pytorch_fairseq_translation.ipynb b/assets/hub/pytorch_fairseq_translation.ipynb index da8317fab5ee..23f7db42837c 100644 --- a/assets/hub/pytorch_fairseq_translation.ipynb +++ b/assets/hub/pytorch_fairseq_translation.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "f37758ff", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -36,15 +37,17 @@ { "cell_type": "code", "execution_count": null, + "id": "11f2a6df", "metadata": {}, "outputs": [], "source": [ "%%bash\n", - "pip install fastBPE regex requests sacremoses subword_nmt" + "pip install bitarray fastBPE hydra-core omegaconf regex requests sacremoses subword_nmt" ] }, { "cell_type": "markdown", + "id": "e34d32b3", "metadata": {}, "source": [ "### English-to-French Translation\n", @@ -56,6 +59,7 @@ { "cell_type": "code", "execution_count": null, + "id": "280bc67c", "metadata": {}, "outputs": [], "source": [ @@ -97,6 +101,7 @@ }, { "cell_type": "markdown", + "id": "f2db0246", "metadata": {}, "source": [ "### English-to-German Translation\n", @@ -118,6 +123,7 @@ { "cell_type": "code", "execution_count": null, + "id": "7a98737d", "metadata": {}, "outputs": [], "source": [ @@ -136,6 +142,7 @@ }, { "cell_type": "markdown", + "id": "318cd7df", "metadata": {}, "source": [ "We can also do a round-trip translation to create a paraphrase:" @@ -144,6 +151,7 @@ { "cell_type": "code", "execution_count": null, + "id": "1f32eb3e", "metadata": {}, "outputs": [], "source": [ @@ -164,6 +172,7 @@ }, { "cell_type": "markdown", + "id": "6af9d4be", "metadata": {}, "source": [ "### References\n", @@ -187,5 +196,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/pytorch_vision_alexnet.ipynb b/assets/hub/pytorch_vision_alexnet.ipynb index c7e09f189c56..66c33d54a2de 100644 --- a/assets/hub/pytorch_vision_alexnet.ipynb +++ b/assets/hub/pytorch_vision_alexnet.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "7692c1d7", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -23,16 +24,18 @@ { "cell_type": "code", "execution_count": null, + "id": "0ac00cc0", "metadata": {}, "outputs": [], "source": [ "import torch\n", - "model = torch.hub.load('pytorch/vision', 'alexnet', pretrained=True)\n", + "model = torch.hub.load('pytorch/vision:v0.10.0', 'alexnet', pretrained=True)\n", "model.eval()" ] }, { "cell_type": "markdown", + "id": "b183042d", "metadata": {}, "source": [ "All pre-trained models expect input images normalized in the same way,\n", @@ -46,12 +49,13 @@ { "cell_type": "code", "execution_count": null, + "id": "a746b62b", "metadata": {}, "outputs": [], "source": [ "# Download an example image from the pytorch website\n", "import urllib\n", - "url, filename = (\"https://github.com/pytorch/hub/raw/master/dog.jpg\", \"dog.jpg\")\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n", "try: urllib.URLopener().retrieve(url, filename)\n", "except: urllib.request.urlretrieve(url, filename)" ] @@ -59,6 +63,7 @@ { "cell_type": "code", "execution_count": null, + "id": "ef673c6e", "metadata": {}, "outputs": [], "source": [ @@ -82,25 +87,54 @@ "\n", "with torch.no_grad():\n", " output = model(input_batch)\n", - "# Tensor of shape 1000, with confidence scores over Imagenet's 1000 classes\n", + "# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes\n", "print(output[0])\n", "# The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n", - "print(torch.nn.functional.softmax(output[0], dim=0))\n" + "probabilities = torch.nn.functional.softmax(output[0], dim=0)\n", + "print(probabilities)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "40120b08", + "metadata": {}, + "outputs": [], + "source": [ + "# Download ImageNet labels\n", + "!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "031215d4", + "metadata": {}, + "outputs": [], + "source": [ + "# Read the categories\n", + "with open(\"imagenet_classes.txt\", \"r\") as f:\n", + " categories = [s.strip() for s in f.readlines()]\n", + "# Show top categories per image\n", + "top5_prob, top5_catid = torch.topk(probabilities, 5)\n", + "for i in range(top5_prob.size(0)):\n", + " print(categories[top5_catid[i]], top5_prob[i].item())" ] }, { "cell_type": "markdown", + "id": "805798b3", "metadata": {}, "source": [ "### Model Description\n", "\n", "AlexNet competed in the ImageNet Large Scale Visual Recognition Challenge on September 30, 2012. The network achieved a top-5 error of 15.3%, more than 10.8 percentage points lower than that of the runner up. The original paper's primary result was that the depth of the model was essential for its high performance, which was computationally expensive, but made feasible due to the utilization of graphics processing units (GPUs) during training.\n", "\n", - "The 1-crop error rates on the imagenet dataset with the pretrained model are listed below.\n", + "The 1-crop error rates on the ImageNet dataset with the pretrained model are listed below.\n", "\n", "| Model structure | Top-1 error | Top-5 error |\n", "| --------------- | ----------- | ----------- |\n", - "| alexnet | 43.45 | 20.91 |\n", + "| AlexNet | 43.45 | 20.91 |\n", "\n", "### References\n", "\n", @@ -110,5 +144,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/pytorch_vision_deeplabv3_resnet101.ipynb b/assets/hub/pytorch_vision_deeplabv3_resnet101.ipynb index cc73b4ab4a19..27cfb279187e 100644 --- a/assets/hub/pytorch_vision_deeplabv3_resnet101.ipynb +++ b/assets/hub/pytorch_vision_deeplabv3_resnet101.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "81a63efd", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -9,11 +10,11 @@ "\n", "----------------------------------------------------------------------\n", "\n", - "# Deeplabv3-ResNet101\n", + "# Deeplabv3\n", "\n", "*Author: Pytorch Team*\n", "\n", - "**DeepLabV3 model with a ResNet-101 backbone**\n", + "**DeepLabV3 models with ResNet-50, ResNet-101 and MobileNet-V3 backbones**\n", "\n", "_ | _\n", "- | -\n", @@ -23,16 +24,21 @@ { "cell_type": "code", "execution_count": null, + "id": "e1918486", "metadata": {}, "outputs": [], "source": [ "import torch\n", - "model = torch.hub.load('pytorch/vision', 'deeplabv3_resnet101', pretrained=True)\n", + "model = torch.hub.load('pytorch/vision:v0.10.0', 'deeplabv3_resnet50', pretrained=True)\n", + "# or any of these variants\n", + "# model = torch.hub.load('pytorch/vision:v0.10.0', 'deeplabv3_resnet101', pretrained=True)\n", + "# model = torch.hub.load('pytorch/vision:v0.10.0', 'deeplabv3_mobilenet_v3_large', pretrained=True)\n", "model.eval()" ] }, { "cell_type": "markdown", + "id": "3cdf09ab", "metadata": {}, "source": [ "All pre-trained models expect input images normalized in the same way,\n", @@ -41,19 +47,20 @@ "and `std = [0.229, 0.224, 0.225]`.\n", "\n", "The model returns an `OrderedDict` with two Tensors that are of the same height and width as the input Tensor, but with 21 classes.\n", - "`output['out']` contains the semantic masks, and `output['aux']` contains the auxillary loss values per-pixel. In inference mode, `output['aux']` is not useful.\n", - "So, `output['out']` is of shape `(N, 21, H, W)`. More documentation can be found [here](https://pytorch.org/docs/stable/torchvision/models.html#object-detection-instance-segmentation-and-person-keypoint-detection)." + "`output['out']` contains the semantic masks, and `output['aux']` contains the auxiliary loss values per-pixel. In inference mode, `output['aux']` is not useful.\n", + "So, `output['out']` is of shape `(N, 21, H, W)`. More documentation can be found [here](https://pytorch.org/vision/stable/models.html#semantic-segmentation)." ] }, { "cell_type": "code", "execution_count": null, + "id": "4721ab36", "metadata": {}, "outputs": [], "source": [ "# Download an example image from the pytorch website\n", "import urllib\n", - "url, filename = (\"https://github.com/pytorch/hub/raw/master/dog.jpg\", \"dog.jpg\")\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/deeplab1.png\", \"deeplab1.png\")\n", "try: urllib.URLopener().retrieve(url, filename)\n", "except: urllib.request.urlretrieve(url, filename)" ] @@ -61,6 +68,7 @@ { "cell_type": "code", "execution_count": null, + "id": "206c642b", "metadata": {}, "outputs": [], "source": [ @@ -68,6 +76,7 @@ "from PIL import Image\n", "from torchvision import transforms\n", "input_image = Image.open(filename)\n", + "input_image = input_image.convert(\"RGB\")\n", "preprocess = transforms.Compose([\n", " transforms.ToTensor(),\n", " transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n", @@ -88,9 +97,10 @@ }, { "cell_type": "markdown", + "id": "694f815f", "metadata": {}, "source": [ - "The output here is of shape `(21, H, W)`, and at each location, there are unnormalized proababilities corresponding to the prediction of each class.\n", + "The output here is of shape `(21, H, W)`, and at each location, there are unnormalized probabilities corresponding to the prediction of each class.\n", "To get the maximum prediction of each class, and then use it for a downstream task, you can do `output_predictions = output.argmax(0)`.\n", "\n", "Here's a small snippet that plots the predictions, with each color being assigned to each class (see the visualized image on the left)." @@ -99,6 +109,7 @@ { "cell_type": "code", "execution_count": null, + "id": "fad7c908", "metadata": {}, "outputs": [], "source": [ @@ -118,18 +129,22 @@ }, { "cell_type": "markdown", + "id": "a719981d", "metadata": {}, "source": [ "### Model Description\n", "\n", - "Deeplabv3-ResNet101 is contructed by a Deeplabv3 model with a ResNet-101 backbone.\n", + "Deeplabv3-ResNet is constructed by a Deeplabv3 model using a ResNet-50 or ResNet-101 backbone.\n", + "Deeplabv3-MobileNetV3-Large is constructed by a Deeplabv3 model using the MobileNetV3 large backbone.\n", "The pre-trained model has been trained on a subset of COCO train2017, on the 20 categories that are present in the Pascal VOC dataset.\n", "\n", "Their accuracies of the pre-trained models evaluated on COCO val2017 dataset are listed below.\n", "\n", - "| Model structure | Mean IOU | Global Pixelwise Accuracy |\n", - "| ------------------- | ----------- | --------------------------|\n", - "| deeplabv3_resnet101 | 67.4 | 92.4 |\n", + "| Model structure | Mean IOU | Global Pixelwise Accuracy |\n", + "| ---------------------------- | ----------- | --------------------------|\n", + "| deeplabv3_resnet50 | 66.4 | 92.4 |\n", + "| deeplabv3_resnet101 | 67.4 | 92.4 |\n", + "| deeplabv3_mobilenet_v3_large | 60.3 | 91.2 |\n", "\n", "### Resources\n", "\n", @@ -139,5 +154,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/pytorch_vision_densenet.ipynb b/assets/hub/pytorch_vision_densenet.ipynb index 60e947b2ef22..17228a6ed557 100644 --- a/assets/hub/pytorch_vision_densenet.ipynb +++ b/assets/hub/pytorch_vision_densenet.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "68899af4", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -23,20 +24,22 @@ { "cell_type": "code", "execution_count": null, + "id": "4763cda8", "metadata": {}, "outputs": [], "source": [ "import torch\n", - "model = torch.hub.load('pytorch/vision', 'densenet121', pretrained=True)\n", + "model = torch.hub.load('pytorch/vision:v0.10.0', 'densenet121', pretrained=True)\n", "# or any of these variants\n", - "# model = torch.hub.load('pytorch/vision', 'densenet169', pretrained=True)\n", - "# model = torch.hub.load('pytorch/vision', 'densenet201', pretrained=True)\n", - "# model = torch.hub.load('pytorch/vision', 'densenet161', pretrained=True)\n", + "# model = torch.hub.load('pytorch/vision:v0.10.0', 'densenet169', pretrained=True)\n", + "# model = torch.hub.load('pytorch/vision:v0.10.0', 'densenet201', pretrained=True)\n", + "# model = torch.hub.load('pytorch/vision:v0.10.0', 'densenet161', pretrained=True)\n", "model.eval()" ] }, { "cell_type": "markdown", + "id": "2c853c4e", "metadata": {}, "source": [ "All pre-trained models expect input images normalized in the same way,\n", @@ -50,12 +53,13 @@ { "cell_type": "code", "execution_count": null, + "id": "a81d3849", "metadata": {}, "outputs": [], "source": [ "# Download an example image from the pytorch website\n", "import urllib\n", - "url, filename = (\"https://github.com/pytorch/hub/raw/master/dog.jpg\", \"dog.jpg\")\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n", "try: urllib.URLopener().retrieve(url, filename)\n", "except: urllib.request.urlretrieve(url, filename)" ] @@ -63,6 +67,7 @@ { "cell_type": "code", "execution_count": null, + "id": "ee9cba07", "metadata": {}, "outputs": [], "source": [ @@ -86,21 +91,50 @@ "\n", "with torch.no_grad():\n", " output = model(input_batch)\n", - "# Tensor of shape 1000, with confidence scores over Imagenet's 1000 classes\n", + "# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes\n", "print(output[0])\n", "# The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n", - "print(torch.nn.functional.softmax(output[0], dim=0))\n" + "probabilities = torch.nn.functional.softmax(output[0], dim=0)\n", + "print(probabilities)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "74a06535", + "metadata": {}, + "outputs": [], + "source": [ + "# Download ImageNet labels\n", + "!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "294e623e", + "metadata": {}, + "outputs": [], + "source": [ + "# Read the categories\n", + "with open(\"imagenet_classes.txt\", \"r\") as f:\n", + " categories = [s.strip() for s in f.readlines()]\n", + "# Show top categories per image\n", + "top5_prob, top5_catid = torch.topk(probabilities, 5)\n", + "for i in range(top5_prob.size(0)):\n", + " print(categories[top5_catid[i]], top5_prob[i].item())" ] }, { "cell_type": "markdown", + "id": "301bc281", "metadata": {}, "source": [ "### Model Description\n", "\n", "Dense Convolutional Network (DenseNet), connects each layer to every other layer in a feed-forward fashion. Whereas traditional convolutional networks with L layers have L connections - one between each layer and its subsequent layer - our network has L(L+1)/2 direct connections. For each layer, the feature-maps of all preceding layers are used as inputs, and its own feature-maps are used as inputs into all subsequent layers. DenseNets have several compelling advantages: they alleviate the vanishing-gradient problem, strengthen feature propagation, encourage feature reuse, and substantially reduce the number of parameters.\n", "\n", - "The 1-crop error rates on the imagenet dataset with the pretrained model are listed below.\n", + "The 1-crop error rates on the ImageNet dataset with the pretrained model are listed below.\n", "\n", "| Model structure | Top-1 error | Top-5 error |\n", "| --------------- | ----------- | ----------- |\n", @@ -117,5 +151,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/pytorch_vision_fcn_resnet101.ipynb b/assets/hub/pytorch_vision_fcn_resnet101.ipynb index c29506225a9b..a0dd2bf34975 100644 --- a/assets/hub/pytorch_vision_fcn_resnet101.ipynb +++ b/assets/hub/pytorch_vision_fcn_resnet101.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "fd19b580", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -9,11 +10,11 @@ "\n", "----------------------------------------------------------------------\n", "\n", - "# FCN-ResNet101\n", + "# FCN\n", "\n", "*Author: Pytorch Team*\n", "\n", - "**Fully-Convolutional Network model with a ResNet-101 backbone**\n", + "**Fully-Convolutional Network model with ResNet-50 and ResNet-101 backbones**\n", "\n", "_ | _\n", "- | -\n", @@ -23,16 +24,20 @@ { "cell_type": "code", "execution_count": null, + "id": "f13bbd6c", "metadata": {}, "outputs": [], "source": [ "import torch\n", - "model = torch.hub.load('pytorch/vision', 'fcn_resnet101', pretrained=True)\n", + "model = torch.hub.load('pytorch/vision:v0.10.0', 'fcn_resnet50', pretrained=True)\n", + "# or\n", + "# model = torch.hub.load('pytorch/vision:v0.10.0', 'fcn_resnet101', pretrained=True)\n", "model.eval()" ] }, { "cell_type": "markdown", + "id": "ff156752", "metadata": {}, "source": [ "All pre-trained models expect input images normalized in the same way,\n", @@ -48,12 +53,13 @@ { "cell_type": "code", "execution_count": null, + "id": "8beb09de", "metadata": {}, "outputs": [], "source": [ "# Download an example image from the pytorch website\n", "import urllib\n", - "url, filename = (\"https://github.com/pytorch/hub/raw/master/dog.jpg\", \"dog.jpg\")\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/deeplab1.png\", \"deeplab1.png\")\n", "try: urllib.URLopener().retrieve(url, filename)\n", "except: urllib.request.urlretrieve(url, filename)" ] @@ -61,6 +67,7 @@ { "cell_type": "code", "execution_count": null, + "id": "3d4d387c", "metadata": {}, "outputs": [], "source": [ @@ -68,6 +75,7 @@ "from PIL import Image\n", "from torchvision import transforms\n", "input_image = Image.open(filename)\n", + "input_image = input_image.convert(\"RGB\")\n", "preprocess = transforms.Compose([\n", " transforms.ToTensor(),\n", " transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n", @@ -88,9 +96,10 @@ }, { "cell_type": "markdown", + "id": "ecd9dc2a", "metadata": {}, "source": [ - "The output here is of shape `(21, H, W)`, and at each location, there are unnormalized proababilities corresponding to the prediction of each class.\n", + "The output here is of shape `(21, H, W)`, and at each location, there are unnormalized probabilities corresponding to the prediction of each class.\n", "To get the maximum prediction of each class, and then use it for a downstream task, you can do `output_predictions = output.argmax(0)`.\n", "\n", "Here's a small snippet that plots the predictions, with each color being assigned to each class (see the visualized image on the left)." @@ -99,6 +108,7 @@ { "cell_type": "code", "execution_count": null, + "id": "6e96363d", "metadata": {}, "outputs": [], "source": [ @@ -118,17 +128,19 @@ }, { "cell_type": "markdown", + "id": "5c13de1f", "metadata": {}, "source": [ "### Model Description\n", "\n", - "FCN-ResNet101 is contructed by a Fully-Covolutional Network model with a ResNet-101 backbone.\n", + "FCN-ResNet is constructed by a Fully-Convolutional Network model, using a ResNet-50 or a ResNet-101 backbone.\n", "The pre-trained models have been trained on a subset of COCO train2017, on the 20 categories that are present in the Pascal VOC dataset.\n", "\n", "Their accuracies of the pre-trained models evaluated on COCO val2017 dataset are listed below.\n", "\n", "| Model structure | Mean IOU | Global Pixelwise Accuracy |\n", "| --------------- | ----------- | --------------------------|\n", + "| fcn_resnet50 | 60.5 | 91.4 |\n", "| fcn_resnet101 | 63.7 | 91.9 |\n", "\n", "### Resources\n", @@ -139,5 +151,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/pytorch_vision_ghostnet.ipynb b/assets/hub/pytorch_vision_ghostnet.ipynb new file mode 100644 index 000000000000..a4201dd70ad7 --- /dev/null +++ b/assets/hub/pytorch_vision_ghostnet.ipynb @@ -0,0 +1,154 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "5419967e", + "metadata": {}, + "source": [ + "### This notebook is optionally accelerated with a GPU runtime.\n", + "### If you would like to use this acceleration, please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# GhostNet\n", + "\n", + "*Author: Huawei Noah's Ark Lab*\n", + "\n", + "**Efficient networks by generating more features from cheap operations**\n", + "\n", + "\"alt\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5bfc8236", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "model = torch.hub.load('huawei-noah/ghostnet', 'ghostnet_1x', pretrained=True)\n", + "model.eval()" + ] + }, + { + "cell_type": "markdown", + "id": "29885b09", + "metadata": {}, + "source": [ + "All pre-trained models expect input images normalized in the same way,\n", + "i.e. mini-batches of 3-channel RGB images of shape `(3 x H x W)`, where `H` and `W` are expected to be at least `224`.\n", + "The images have to be loaded in to a range of `[0, 1]` and then normalized using `mean = [0.485, 0.456, 0.406]`\n", + "and `std = [0.229, 0.224, 0.225]`.\n", + "\n", + "Here's a sample execution." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3e09d4c9", + "metadata": {}, + "outputs": [], + "source": [ + "# Download an example image from the pytorch website\n", + "import urllib\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n", + "try: urllib.URLopener().retrieve(url, filename)\n", + "except: urllib.request.urlretrieve(url, filename)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e0f35591", + "metadata": {}, + "outputs": [], + "source": [ + "# sample execution (requires torchvision)\n", + "from PIL import Image\n", + "from torchvision import transforms\n", + "input_image = Image.open(filename)\n", + "preprocess = transforms.Compose([\n", + " transforms.Resize(256),\n", + " transforms.CenterCrop(224),\n", + " transforms.ToTensor(),\n", + " transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n", + "])\n", + "input_tensor = preprocess(input_image)\n", + "input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model\n", + "\n", + "# move the input and model to GPU for speed if available\n", + "if torch.cuda.is_available():\n", + " input_batch = input_batch.to('cuda')\n", + " model.to('cuda')\n", + "\n", + "with torch.no_grad():\n", + " output = model(input_batch)\n", + "# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes\n", + "print(output[0])\n", + "# The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n", + "probabilities = torch.nn.functional.softmax(output[0], dim=0)\n", + "print(probabilities)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "97348852", + "metadata": {}, + "outputs": [], + "source": [ + "# Download ImageNet labels\n", + "!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4554be1c", + "metadata": {}, + "outputs": [], + "source": [ + "# Read the categories\n", + "with open(\"imagenet_classes.txt\", \"r\") as f:\n", + " categories = [s.strip() for s in f.readlines()]\n", + "# Show top categories per image\n", + "top5_prob, top5_catid = torch.topk(probabilities, 5)\n", + "for i in range(top5_prob.size(0)):\n", + " print(categories[top5_catid[i]], top5_prob[i].item())" + ] + }, + { + "cell_type": "markdown", + "id": "aa1eafb9", + "metadata": {}, + "source": [ + "### Model Description\n", + "\n", + "The GhostNet architecture is based on an Ghost module structure which generate more features from cheap operations. Based on a set of intrinsic feature maps, a series of cheap operations are applied to generate many ghost feature maps that could fully reveal information underlying intrinsic features. Experiments conducted on benchmarks demonstrate that the superiority of GhostNet in terms of speed and accuracy tradeoff.\n", + "\n", + "The corresponding accuracy on ImageNet dataset with pretrained model is listed below.\n", + "\n", + "| Model structure | FLOPs | Top-1 acc | Top-5 acc |\n", + "| --------------- | ----------- | ----------- | ----------- |\n", + "| GhostNet 1.0x | 142M | 73.98 | 91.46 |\n", + "\n", + "\n", + "### References\n", + "\n", + "You can read the full paper at this [link](https://arxiv.org/abs/1911.11907).\n", + "\n", + ">@inproceedings{han2019ghostnet,\n", + "> title={GhostNet: More Features from Cheap Operations},\n", + "> author={Kai Han and Yunhe Wang and Qi Tian and Jianyuan Guo and Chunjing Xu and Chang Xu},\n", + "> booktitle={CVPR},\n", + "> year={2020},\n", + ">}" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/pytorch_vision_googlenet.ipynb b/assets/hub/pytorch_vision_googlenet.ipynb index 0bc6484dec47..835331c610a7 100644 --- a/assets/hub/pytorch_vision_googlenet.ipynb +++ b/assets/hub/pytorch_vision_googlenet.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "1969f331", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -23,16 +24,18 @@ { "cell_type": "code", "execution_count": null, + "id": "de65b5c3", "metadata": {}, "outputs": [], "source": [ "import torch\n", - "model = torch.hub.load('pytorch/vision', 'googlenet', pretrained=True)\n", + "model = torch.hub.load('pytorch/vision:v0.10.0', 'googlenet', pretrained=True)\n", "model.eval()" ] }, { "cell_type": "markdown", + "id": "2af024dd", "metadata": {}, "source": [ "All pre-trained models expect input images normalized in the same way,\n", @@ -46,12 +49,13 @@ { "cell_type": "code", "execution_count": null, + "id": "0128a2ed", "metadata": {}, "outputs": [], "source": [ "# Download an example image from the pytorch website\n", "import urllib\n", - "url, filename = (\"https://github.com/pytorch/hub/raw/master/dog.jpg\", \"dog.jpg\")\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n", "try: urllib.URLopener().retrieve(url, filename)\n", "except: urllib.request.urlretrieve(url, filename)" ] @@ -59,6 +63,7 @@ { "cell_type": "code", "execution_count": null, + "id": "3b66b181", "metadata": {}, "outputs": [], "source": [ @@ -82,14 +87,43 @@ "\n", "with torch.no_grad():\n", " output = model(input_batch)\n", - "# Tensor of shape 1000, with confidence scores over Imagenet's 1000 classes\n", + "# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes\n", "print(output[0])\n", "# The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n", - "print(torch.nn.functional.softmax(output[0], dim=0))\n" + "probabilities = torch.nn.functional.softmax(output[0], dim=0)\n", + "print(probabilities)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2b0cced5", + "metadata": {}, + "outputs": [], + "source": [ + "# Download ImageNet labels\n", + "!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15d04883", + "metadata": {}, + "outputs": [], + "source": [ + "# Read the categories\n", + "with open(\"imagenet_classes.txt\", \"r\") as f:\n", + " categories = [s.strip() for s in f.readlines()]\n", + "# Show top categories per image\n", + "top5_prob, top5_catid = torch.topk(probabilities, 5)\n", + "for i in range(top5_prob.size(0)):\n", + " print(categories[top5_catid[i]], top5_prob[i].item())" ] }, { "cell_type": "markdown", + "id": "c5b0bd52", "metadata": {}, "source": [ "### Model Description\n", @@ -110,5 +144,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/pytorch_vision_hardnet.ipynb b/assets/hub/pytorch_vision_hardnet.ipynb index 7c83db46f2ed..9ad7325c28a6 100644 --- a/assets/hub/pytorch_vision_hardnet.ipynb +++ b/assets/hub/pytorch_vision_hardnet.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "ffdd445b", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -23,6 +24,7 @@ { "cell_type": "code", "execution_count": null, + "id": "46ba8187", "metadata": {}, "outputs": [], "source": [ @@ -37,6 +39,7 @@ }, { "cell_type": "markdown", + "id": "b64975bc", "metadata": {}, "source": [ "All pre-trained models expect input images normalized in the same way,\n", @@ -50,12 +53,13 @@ { "cell_type": "code", "execution_count": null, + "id": "05a7120f", "metadata": {}, "outputs": [], "source": [ "# Download an example image from the pytorch website\n", "import urllib\n", - "url, filename = (\"https://github.com/pytorch/hub/raw/master/dog.jpg\", \"dog.jpg\")\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n", "try: urllib.URLopener().retrieve(url, filename)\n", "except: urllib.request.urlretrieve(url, filename)" ] @@ -63,6 +67,7 @@ { "cell_type": "code", "execution_count": null, + "id": "6b3e79b5", "metadata": {}, "outputs": [], "source": [ @@ -86,14 +91,43 @@ "\n", "with torch.no_grad():\n", " output = model(input_batch)\n", - "# Tensor of shape 1000, with confidence scores over Imagenet's 1000 classes\n", + "# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes\n", "print(output[0])\n", "# The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n", - "print(torch.nn.functional.softmax(output[0], dim=0))\n" + "probabilities = torch.nn.functional.softmax(output[0], dim=0)\n", + "print(probabilities)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "be14d6a6", + "metadata": {}, + "outputs": [], + "source": [ + "# Download ImageNet labels\n", + "!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "97409a94", + "metadata": {}, + "outputs": [], + "source": [ + "# Read the categories\n", + "with open(\"imagenet_classes.txt\", \"r\") as f:\n", + " categories = [s.strip() for s in f.readlines()]\n", + "# Show top categories per image\n", + "top5_prob, top5_catid = torch.topk(probabilities, 5)\n", + "for i in range(top5_prob.size(0)):\n", + " print(categories[top5_catid[i]], top5_prob[i].item())" ] }, { "cell_type": "markdown", + "id": "fe532a5e", "metadata": {}, "source": [ "### Model Description\n", @@ -106,7 +140,7 @@ "\n", "Here we have the 4 versions of hardnet models, which contains 39, 68, 85 layers\n", "w/ or w/o Depthwise Separable Conv respectively.\n", - "Their 1-crop error rates on imagenet dataset with pretrained models are listed below.\n", + "Their 1-crop error rates on ImageNet dataset with pretrained models are listed below.\n", "\n", "| Model structure | Top-1 error | Top-5 error |\n", "| --------------- | ----------- | ----------- |\n", @@ -123,5 +157,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/pytorch_vision_ibnnet.ipynb b/assets/hub/pytorch_vision_ibnnet.ipynb new file mode 100644 index 000000000000..f76d32a653ed --- /dev/null +++ b/assets/hub/pytorch_vision_ibnnet.ipynb @@ -0,0 +1,164 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0be70a35", + "metadata": {}, + "source": [ + "### This notebook is optionally accelerated with a GPU runtime.\n", + "### If you would like to use this acceleration, please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# IBN-Net\n", + "\n", + "*Author: Xingang Pan*\n", + "\n", + "**Networks with domain/appearance invariance**\n", + "\n", + "\"alt\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f92977b3", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "model = torch.hub.load('XingangPan/IBN-Net', 'resnet50_ibn_a', pretrained=True)\n", + "model.eval()" + ] + }, + { + "cell_type": "markdown", + "id": "8db6eceb", + "metadata": {}, + "source": [ + "All pre-trained models expect input images normalized in the same way,\n", + "i.e. mini-batches of 3-channel RGB images of shape `(3 x H x W)`, where `H` and `W` are expected to be at least `224`.\n", + "The images have to be loaded in to a range of `[0, 1]` and then normalized using `mean = [0.485, 0.456, 0.406]`\n", + "and `std = [0.229, 0.224, 0.225]`.\n", + "\n", + "Here's a sample execution." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5d7b978c", + "metadata": {}, + "outputs": [], + "source": [ + "# Download an example image from the pytorch website\n", + "import urllib\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n", + "try: urllib.URLopener().retrieve(url, filename)\n", + "except: urllib.request.urlretrieve(url, filename)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c642dd5f", + "metadata": {}, + "outputs": [], + "source": [ + "# sample execution (requires torchvision)\n", + "from PIL import Image\n", + "from torchvision import transforms\n", + "input_image = Image.open(filename)\n", + "preprocess = transforms.Compose([\n", + " transforms.Resize(256),\n", + " transforms.CenterCrop(224),\n", + " transforms.ToTensor(),\n", + " transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n", + "])\n", + "input_tensor = preprocess(input_image)\n", + "input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model\n", + "\n", + "# move the input and model to GPU for speed if available\n", + "if torch.cuda.is_available():\n", + " input_batch = input_batch.to('cuda')\n", + " model.to('cuda')\n", + "\n", + "with torch.no_grad():\n", + " output = model(input_batch)\n", + "# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes\n", + "print(output[0])\n", + "# The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n", + "probabilities = torch.nn.functional.softmax(output[0], dim=0)\n", + "print(probabilities)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b54afb2a", + "metadata": {}, + "outputs": [], + "source": [ + "# Download ImageNet labels\n", + "!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b181b3c4", + "metadata": {}, + "outputs": [], + "source": [ + "# Read the categories\n", + "with open(\"imagenet_classes.txt\", \"r\") as f:\n", + " categories = [s.strip() for s in f.readlines()]\n", + "# Show top categories per image\n", + "top5_prob, top5_catid = torch.topk(probabilities, 5)\n", + "for i in range(top5_prob.size(0)):\n", + " print(categories[top5_catid[i]], top5_prob[i].item())" + ] + }, + { + "cell_type": "markdown", + "id": "b72c668a", + "metadata": {}, + "source": [ + "### Model Description\n", + "\n", + "IBN-Net is a CNN model with domain/appearance invariance.\n", + "Motivated by style transfer works, IBN-Net carefully unifies instance normalization and batch normalization in a single deep network.\n", + "It provides a simple way to increase both modeling and generalization capacities without adding model complexity.\n", + "IBN-Net is especially suitable for cross domain or person/vehicle re-identification tasks.\n", + "\n", + "The corresponding accuracies on ImageNet dataset with pretrained models are listed below.\n", + "\n", + "| Model name | Top-1 acc | Top-5 acc |\n", + "| --------------- | ----------- | ----------- |\n", + "| resnet50_ibn_a | 77.46 | 93.68 |\n", + "| resnet101_ibn_a | 78.61 | 94.41 |\n", + "| resnext101_ibn_a | 79.12 | 94.58 |\n", + "| se_resnet101_ibn_a | 78.75 | 94.49 |\n", + "\n", + "The rank1/mAP on two Re-ID benchmarks Market1501 and DukeMTMC-reID are listed below (from [michuanhaohao/reid-strong-baseline](https://github.com/michuanhaohao/reid-strong-baseline)).\n", + "\n", + "| Backbone | Market1501 | DukeMTMC-reID |\n", + "| --- | -- | -- |\n", + "| ResNet50 | 94.5 (85.9) | 86.4 (76.4) |\n", + "| ResNet101 | 94.5 (87.1) | 87.6 (77.6) |\n", + "| SeResNet50 | 94.4 (86.3) | 86.4 (76.5) |\n", + "| SeResNet101 | 94.6 (87.3) | 87.5 (78.0) |\n", + "| SeResNeXt50 | 94.9 (87.6) | 88.0 (78.3) |\n", + "| SeResNeXt101 | 95.0 (88.0) | 88.4 (79.0) |\n", + "| ResNet50-IBN-a | 95.0 (88.2) | 90.1 (79.1) |\n", + "\n", + "### References\n", + "\n", + " - [Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net](https://arxiv.org/abs/1807.09441)" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/pytorch_vision_inception_v3.ipynb b/assets/hub/pytorch_vision_inception_v3.ipynb index a90c6c0785ce..e78a62bb41b5 100644 --- a/assets/hub/pytorch_vision_inception_v3.ipynb +++ b/assets/hub/pytorch_vision_inception_v3.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "3aefb0c8", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -13,7 +14,7 @@ "\n", "*Author: Pytorch Team*\n", "\n", - "**Also called GoogleNetv3, a famous ConvNet trained on Imagenet from 2015**\n", + "**Also called GoogleNetv3, a famous ConvNet trained on ImageNet from 2015**\n", "\n", "\"alt\"" ] @@ -21,16 +22,18 @@ { "cell_type": "code", "execution_count": null, + "id": "30ef872a", "metadata": {}, "outputs": [], "source": [ "import torch\n", - "model = torch.hub.load('pytorch/vision', 'inception_v3', pretrained=True)\n", + "model = torch.hub.load('pytorch/vision:v0.10.0', 'inception_v3', pretrained=True)\n", "model.eval()" ] }, { "cell_type": "markdown", + "id": "908b3755", "metadata": {}, "source": [ "All pre-trained models expect input images normalized in the same way,\n", @@ -44,12 +47,13 @@ { "cell_type": "code", "execution_count": null, + "id": "988336ea", "metadata": {}, "outputs": [], "source": [ "# Download an example image from the pytorch website\n", "import urllib\n", - "url, filename = (\"https://github.com/pytorch/hub/raw/master/dog.jpg\", \"dog.jpg\")\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n", "try: urllib.URLopener().retrieve(url, filename)\n", "except: urllib.request.urlretrieve(url, filename)" ] @@ -57,6 +61,7 @@ { "cell_type": "code", "execution_count": null, + "id": "9cad110b", "metadata": {}, "outputs": [], "source": [ @@ -80,21 +85,50 @@ "\n", "with torch.no_grad():\n", " output = model(input_batch)\n", - "# Tensor of shape 1000, with confidence scores over Imagenet's 1000 classes\n", + "# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes\n", "print(output[0])\n", "# The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n", - "print(torch.nn.functional.softmax(output[0], dim=0))\n" + "probabilities = torch.nn.functional.softmax(output[0], dim=0)\n", + "print(probabilities)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17c2dede", + "metadata": {}, + "outputs": [], + "source": [ + "# Download ImageNet labels\n", + "!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d9e739aa", + "metadata": {}, + "outputs": [], + "source": [ + "# Read the categories\n", + "with open(\"imagenet_classes.txt\", \"r\") as f:\n", + " categories = [s.strip() for s in f.readlines()]\n", + "# Show top categories per image\n", + "top5_prob, top5_catid = torch.topk(probabilities, 5)\n", + "for i in range(top5_prob.size(0)):\n", + " print(categories[top5_catid[i]], top5_prob[i].item())" ] }, { "cell_type": "markdown", + "id": "fcee2231", "metadata": {}, "source": [ "### Model Description\n", "\n", "Inception v3: Based on the exploration of ways to scale up networks in ways that aim at utilizing the added computation as efficiently as possible by suitably factorized convolutions and aggressive regularization. We benchmark our methods on the ILSVRC 2012 classification challenge validation set demonstrate substantial gains over the state of the art: 21.2% top-1 and 5.6% top-5 error for single frame evaluation using a network with a computational cost of 5 billion multiply-adds per inference and with using less than 25 million parameters. With an ensemble of 4 models and multi-crop evaluation, we report 3.5% top-5 error on the validation set (3.6% error on the test set) and 17.3% top-1 error on the validation set.\n", "\n", - "The 1-crop error rates on the imagenet dataset with the pretrained model are listed below.\n", + "The 1-crop error rates on the ImageNet dataset with the pretrained model are listed below.\n", "\n", "| Model structure | Top-1 error | Top-5 error |\n", "| --------------- | ----------- | ----------- |\n", @@ -108,5 +142,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/pytorch_vision_meal_v2.ipynb b/assets/hub/pytorch_vision_meal_v2.ipynb new file mode 100644 index 000000000000..6e0f749e2479 --- /dev/null +++ b/assets/hub/pytorch_vision_meal_v2.ipynb @@ -0,0 +1,199 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "10350fa6", + "metadata": {}, + "source": [ + "### This notebook requires a GPU runtime to run.\n", + "### Please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# MEAL_V2\n", + "\n", + "*Author: Carnegie Mellon University*\n", + "\n", + "**Boosting Tiny and Efficient Models using Knowledge Distillation.**\n", + "\n", + "_ | _\n", + "- | -\n", + "![alt](https://pytorch.org/assets/images/MEALV2_method.png) | ![alt](https://pytorch.org/assets/images/MEALV2_results.png)\n", + "\n", + "\n", + "We require one additional Python dependency" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1cd0a471", + "metadata": {}, + "outputs": [], + "source": [ + "%%bash\n", + "!pip install timm" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "738af1be", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "# list of models: 'mealv1_resnest50', 'mealv2_resnest50', 'mealv2_resnest50_cutmix', 'mealv2_resnest50_380x380', 'mealv2_mobilenetv3_small_075', 'mealv2_mobilenetv3_small_100', 'mealv2_mobilenet_v3_large_100', 'mealv2_efficientnet_b0'\n", + "# load pretrained models, using \"mealv2_resnest50_cutmix\" as an example\n", + "model = torch.hub.load('szq0214/MEAL-V2','meal_v2', 'mealv2_resnest50_cutmix', pretrained=True)\n", + "model.eval()" + ] + }, + { + "cell_type": "markdown", + "id": "03479891", + "metadata": {}, + "source": [ + "All pre-trained models expect input images normalized in the same way,\n", + "i.e. mini-batches of 3-channel RGB images of shape `(3 x H x W)`, where `H` and `W` are expected to be at least `224`.\n", + "The images have to be loaded in to a range of `[0, 1]` and then normalized using `mean = [0.485, 0.456, 0.406]`\n", + "and `std = [0.229, 0.224, 0.225]`.\n", + "\n", + "Here's a sample execution." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ef3fb94b", + "metadata": {}, + "outputs": [], + "source": [ + "# Download an example image from the pytorch website\n", + "import urllib\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n", + "try: urllib.URLopener().retrieve(url, filename)\n", + "except: urllib.request.urlretrieve(url, filename)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "50e222ff", + "metadata": {}, + "outputs": [], + "source": [ + "# sample execution (requires torchvision)\n", + "from PIL import Image\n", + "from torchvision import transforms\n", + "input_image = Image.open(filename)\n", + "preprocess = transforms.Compose([\n", + " transforms.Resize(256),\n", + " transforms.CenterCrop(224),\n", + " transforms.ToTensor(),\n", + " transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n", + "])\n", + "input_tensor = preprocess(input_image)\n", + "input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model\n", + "\n", + "# move the input and model to GPU for speed if available\n", + "if torch.cuda.is_available():\n", + " input_batch = input_batch.to('cuda')\n", + " model.to('cuda')\n", + "\n", + "with torch.no_grad():\n", + " output = model(input_batch)\n", + "# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes\n", + "print(output[0])\n", + "# The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n", + "probabilities = torch.nn.functional.softmax(output[0], dim=0)\n", + "print(probabilities)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "005e7308", + "metadata": {}, + "outputs": [], + "source": [ + "# Download ImageNet labels\n", + "!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "27fefc5f", + "metadata": {}, + "outputs": [], + "source": [ + "# Read the categories\n", + "with open(\"imagenet_classes.txt\", \"r\") as f:\n", + " categories = [s.strip() for s in f.readlines()]\n", + "# Show top categories per image\n", + "top5_prob, top5_catid = torch.topk(probabilities, 5)\n", + "for i in range(top5_prob.size(0)):\n", + " print(categories[top5_catid[i]], top5_prob[i].item())" + ] + }, + { + "cell_type": "markdown", + "id": "77cda746", + "metadata": {}, + "source": [ + "### Model Description\n", + "\n", + "MEAL V2 models are from the [MEAL V2: Boosting Vanilla ResNet-50 to 80%+ Top-1 Accuracy on ImageNet without Tricks](https://arxiv.org/pdf/2009.08453.pdf) paper.\n", + "\n", + "In this paper, we introduce a simple yet effective approach that can boost the vanilla ResNet-50 to 80%+ Top-1 accuracy on ImageNet without any tricks. Generally, our method is based on the recently proposed [MEAL](https://arxiv.org/abs/1812.02425), i.e., ensemble knowledge distillation via discriminators. We further simplify it through 1) adopting the similarity loss and discriminator only on the final outputs and 2) using the average of softmax probabilities from all teacher ensembles as the stronger supervision for distillation. One crucial perspective of our method is that the one-hot/hard label should not be used in the distillation process. We show that such a simple framework can achieve state-of-the-art results without involving any commonly-used tricks, such as 1) architecture modification; 2) outside training data beyond ImageNet; 3) autoaug/randaug; 4) cosine learning rate; 5) mixup/cutmix training; 6) label smoothing; etc.\n", + "\n", + "| Models | Resolution| #Parameters | Top-1/Top-5 |\n", + "| :---: | :-: | :-: | :------:| :------: | \n", + "| [MEAL-V1 w/ ResNet50](https://arxiv.org/abs/1812.02425) | 224 | 25.6M |**78.21/94.01** | [GitHub](https://github.com/AaronHeee/MEAL#imagenet-model) |\n", + "| MEAL-V2 w/ ResNet50 | 224 | 25.6M | **80.67/95.09** | \n", + "| MEAL-V2 w/ ResNet50| 380 | 25.6M | **81.72/95.81** | \n", + "| MEAL-V2 + CutMix w/ ResNet50| 224 | 25.6M | **80.98/95.35** | \n", + "| MEAL-V2 w/ MobileNet V3-Small 0.75| 224 | 2.04M | **67.60/87.23** | \n", + "| MEAL-V2 w/ MobileNet V3-Small 1.0| 224 | 2.54M | **69.65/88.71** | \n", + "| MEAL-V2 w/ MobileNet V3-Large 1.0 | 224 | 5.48M | **76.92/93.32** | \n", + "| MEAL-V2 w/ EfficientNet-B0| 224 | 5.29M | **78.29/93.95** | \n", + "\n", + "### References\n", + "\n", + "Please refer to our papers [MEAL V2](https://arxiv.org/pdf/2009.08453.pdf), [MEAL](https://arxiv.org/pdf/1812.02425.pdf) for more details." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "07a1b5e2", + "metadata": {}, + "outputs": [], + "source": [ + "@article{shen2020mealv2,\n", + " title={MEAL V2: Boosting Vanilla ResNet-50 to 80%+ Top-1 Accuracy on ImageNet without Tricks},\n", + " author={Shen, Zhiqiang and Savvides, Marios},\n", + " journal={arXiv preprint arXiv:2009.08453},\n", + " year={2020}\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "d9792ede", + "metadata": {}, + "source": [ + "@inproceedings{shen2019MEAL,\n", + "\t\ttitle = {MEAL: Multi-Model Ensemble via Adversarial Learning},\n", + "\t\tauthor = {Shen, Zhiqiang and He, Zhankui and Xue, Xiangyang},\n", + "\t\tbooktitle = {AAAI},\n", + "\t\tyear = {2019}\n", + "\t}" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/pytorch_vision_mobilenet_v2.ipynb b/assets/hub/pytorch_vision_mobilenet_v2.ipynb index 977b2835883a..0c4f292588f4 100644 --- a/assets/hub/pytorch_vision_mobilenet_v2.ipynb +++ b/assets/hub/pytorch_vision_mobilenet_v2.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "2fdbaf2a", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -23,16 +24,18 @@ { "cell_type": "code", "execution_count": null, + "id": "4c34bc45", "metadata": {}, "outputs": [], "source": [ "import torch\n", - "model = torch.hub.load('pytorch/vision', 'mobilenet_v2', pretrained=True)\n", + "model = torch.hub.load('pytorch/vision:v0.10.0', 'mobilenet_v2', pretrained=True)\n", "model.eval()" ] }, { "cell_type": "markdown", + "id": "8ff4161f", "metadata": {}, "source": [ "All pre-trained models expect input images normalized in the same way,\n", @@ -46,12 +49,13 @@ { "cell_type": "code", "execution_count": null, + "id": "d1082309", "metadata": {}, "outputs": [], "source": [ "# Download an example image from the pytorch website\n", "import urllib\n", - "url, filename = (\"https://github.com/pytorch/hub/raw/master/dog.jpg\", \"dog.jpg\")\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n", "try: urllib.URLopener().retrieve(url, filename)\n", "except: urllib.request.urlretrieve(url, filename)" ] @@ -59,6 +63,7 @@ { "cell_type": "code", "execution_count": null, + "id": "7cf3aa09", "metadata": {}, "outputs": [], "source": [ @@ -82,14 +87,43 @@ "\n", "with torch.no_grad():\n", " output = model(input_batch)\n", - "# Tensor of shape 1000, with confidence scores over Imagenet's 1000 classes\n", + "# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes\n", "print(output[0])\n", "# The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n", - "print(torch.nn.functional.softmax(output[0], dim=0))\n" + "probabilities = torch.nn.functional.softmax(output[0], dim=0)\n", + "print(probabilities)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14d5e43d", + "metadata": {}, + "outputs": [], + "source": [ + "# Download ImageNet labels\n", + "!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "129b008f", + "metadata": {}, + "outputs": [], + "source": [ + "# Read the categories\n", + "with open(\"imagenet_classes.txt\", \"r\") as f:\n", + " categories = [s.strip() for s in f.readlines()]\n", + "# Show top categories per image\n", + "top5_prob, top5_catid = torch.topk(probabilities, 5)\n", + "for i in range(top5_prob.size(0)):\n", + " print(categories[top5_catid[i]], top5_prob[i].item())" ] }, { "cell_type": "markdown", + "id": "77311437", "metadata": {}, "source": [ "### Model Description\n", @@ -109,5 +143,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/pytorch_vision_once_for_all.ipynb b/assets/hub/pytorch_vision_once_for_all.ipynb new file mode 100644 index 000000000000..b6f92dbd89ba --- /dev/null +++ b/assets/hub/pytorch_vision_once_for_all.ipynb @@ -0,0 +1,210 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "f3757c21", + "metadata": {}, + "source": [ + "### This notebook is optionally accelerated with a GPU runtime.\n", + "### If you would like to use this acceleration, please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# Once-for-All\n", + "\n", + "*Author: MIT Han Lab*\n", + "\n", + "**Once-for-all (OFA) decouples training and search, and achieves efficient inference across various edge devices and resource constraints.**\n", + "\n", + "\"alt\"\n", + "\n", + "\n", + "\n", + "\n", + "### Get supernet\n", + "\n", + "You can quickly load a supernet as following" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3bc7dc84", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "super_net_name = \"ofa_supernet_mbv3_w10\" \n", + "# other options: \n", + "# ofa_supernet_resnet50 / \n", + "# ofa_supernet_mbv3_w12 / \n", + "# ofa_supernet_proxyless\n", + "\n", + "super_net = torch.hub.load('mit-han-lab/once-for-all', super_net_name, pretrained=True).eval()" + ] + }, + { + "cell_type": "markdown", + "id": "4319718f", + "metadata": {}, + "source": [ + "| OFA Network | Design Space | Resolution | Width Multiplier | Depth | Expand Ratio | kernel Size | \n", + "|----------------------|----------|----------|---------|------------|---------|------------|\n", + "| ofa_resnet50 | ResNet50D | 128 - 224 | 0.65, 0.8, 1.0 | 0, 1, 2 | 0.2, 0.25, 0.35 | 3 |\n", + "| ofa_mbv3_d234_e346_k357_w1.0 | MobileNetV3 | 128 - 224 | 1.0 | 2, 3, 4 | 3, 4, 6 | 3, 5, 7 |\n", + "| ofa_mbv3_d234_e346_k357_w1.2 | MobileNetV3 | 160 - 224 | 1.2 | 2, 3, 4 | 3, 4, 6 | 3, 5, 7 |\n", + "| ofa_proxyless_d234_e346_k357_w1.3 | ProxylessNAS | 128 - 224 | 1.3 | 2, 3, 4 | 3, 4, 6 | 3, 5, 7 |\n", + "\n", + "\n", + "Below are the usage of sampling / selecting a subnet from the supernet" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "463fb4c2", + "metadata": {}, + "outputs": [], + "source": [ + "# Randomly sample sub-networks from OFA network\n", + "super_net.sample_active_subnet()\n", + "random_subnet = super_net.get_active_subnet(preserve_weight=True)\n", + " \n", + "# Manually set the sub-network\n", + "super_net.set_active_subnet(ks=7, e=6, d=4)\n", + "manual_subnet = super_net.get_active_subnet(preserve_weight=True)" + ] + }, + { + "cell_type": "markdown", + "id": "e097dc4e", + "metadata": {}, + "source": [ + "### Get Specialized Architecture" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "eab014b1", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "\n", + "# or load a architecture specialized for certain platform\n", + "net_config = \"resnet50D_MAC_4_1B\"\n", + "\n", + "specialized_net, image_size = torch.hub.load('mit-han-lab/once-for-all', net_config, pretrained=True)\n", + "specialized_net.eval()" + ] + }, + { + "cell_type": "markdown", + "id": "421f2d0e", + "metadata": {}, + "source": [ + "More models and configurations can be found in [once-for-all/model-zoo](https://github.com/mit-han-lab/once-for-all#evaluate-1)\n", + "and obtained through the following scripts" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "55a329a7", + "metadata": {}, + "outputs": [], + "source": [ + "ofa_specialized_get = torch.hub.load('mit-han-lab/once-for-all', \"ofa_specialized_get\")\n", + "model, image_size = ofa_specialized_get(\"flops@595M_top1@80.0_finetune@75\", pretrained=True)\n", + "model.eval()" + ] + }, + { + "cell_type": "markdown", + "id": "95c0d1b2", + "metadata": {}, + "source": [ + "The model's prediction can be evalutaed by" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c61486e1", + "metadata": {}, + "outputs": [], + "source": [ + "# Download an example image from pytorch website\n", + "import urllib\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n", + "try: \n", + " urllib.URLopener().retrieve(url, filename)\n", + "except: \n", + " urllib.request.urlretrieve(url, filename)\n", + "\n", + "\n", + "# sample execution (requires torchvision)\n", + "from PIL import Image\n", + "from torchvision import transforms\n", + "input_image = Image.open(filename)\n", + "preprocess = transforms.Compose([\n", + " transforms.Resize(256),\n", + " transforms.CenterCrop(224),\n", + " transforms.ToTensor(),\n", + " transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n", + "])\n", + "input_tensor = preprocess(input_image)\n", + "input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model\n", + "\n", + "# move the input and model to GPU for speed if available\n", + "if torch.cuda.is_available():\n", + " input_batch = input_batch.to('cuda')\n", + " model.to('cuda')\n", + "\n", + "with torch.no_grad():\n", + " output = model(input_batch)\n", + "# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes\n", + "print(output[0])\n", + "# The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n", + "probabilities = torch.nn.functional.softmax(output[0], dim=0)\n", + "print(probabilities)\n" + ] + }, + { + "cell_type": "markdown", + "id": "e1d3b367", + "metadata": {}, + "source": [ + "### Model Description\n", + "Once-for-all models are from [Once for All: Train One Network and Specialize it for Efficient Deployment](https://arxiv.org/abs/1908.09791). Conventional approaches either manually design or use neural architecture search (NAS) to find a specialized neural network and train it from scratch for each case, which is computationally prohibitive (causing CO2 emission as much as 5 cars' lifetime) thus unscalable. In this work, we propose to train a once-for-all (OFA) network that supports diverse architectural settings by decoupling training and search. Across diverse edge devices, OFA consistently outperforms state-of-the-art (SOTA) NAS methods (up to 4.0% ImageNet top1 accuracy improvement over MobileNetV3, or same accuracy but 1.5x faster than MobileNetV3, 2.6x faster than EfficientNet w.r.t measured latency) while reducing many orders of magnitude GPU hours and CO2 emission. In particular, OFA achieves a new SOTA 80.0% ImageNet top-1 accuracy under the mobile setting (<600M MACs).\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "### References" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9df3f022", + "metadata": {}, + "outputs": [], + "source": [ + "@inproceedings{\n", + " cai2020once,\n", + " title={Once for All: Train One Network and Specialize it for Efficient Deployment},\n", + " author={Han Cai and Chuang Gan and Tianzhe Wang and Zhekai Zhang and Song Han},\n", + " booktitle={International Conference on Learning Representations},\n", + " year={2020},\n", + " url={https://arxiv.org/pdf/1908.09791.pdf}\n", + "}" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/pytorch_vision_proxylessnas.ipynb b/assets/hub/pytorch_vision_proxylessnas.ipynb index d768796b6f4a..539b0dbf4e80 100644 --- a/assets/hub/pytorch_vision_proxylessnas.ipynb +++ b/assets/hub/pytorch_vision_proxylessnas.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "22347183", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -21,6 +22,7 @@ { "cell_type": "code", "execution_count": null, + "id": "626e5228", "metadata": {}, "outputs": [], "source": [ @@ -33,6 +35,7 @@ }, { "cell_type": "markdown", + "id": "01d12ab0", "metadata": {}, "source": [ "All pre-trained models expect input images normalized in the same way,\n", @@ -46,12 +49,13 @@ { "cell_type": "code", "execution_count": null, + "id": "0fb2b20d", "metadata": {}, "outputs": [], "source": [ "# Download an example image from the pytorch website\n", "import urllib\n", - "url, filename = (\"https://github.com/pytorch/hub/raw/master/dog.jpg\", \"dog.jpg\")\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n", "try: urllib.URLopener().retrieve(url, filename)\n", "except: urllib.request.urlretrieve(url, filename)" ] @@ -59,6 +63,7 @@ { "cell_type": "code", "execution_count": null, + "id": "111225eb", "metadata": {}, "outputs": [], "source": [ @@ -82,14 +87,43 @@ "\n", "with torch.no_grad():\n", " output = model(input_batch)\n", - "# Tensor of shape 1000, with confidence scores over Imagenet's 1000 classes\n", + "# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes\n", "print(output[0])\n", "# The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n", - "print(torch.nn.functional.softmax(output[0], dim=0))\n" + "probabilities = torch.nn.functional.softmax(output[0], dim=0)\n", + "print(probabilities)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b511e407", + "metadata": {}, + "outputs": [], + "source": [ + "# Download ImageNet labels\n", + "!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "580ff984", + "metadata": {}, + "outputs": [], + "source": [ + "# Read the categories\n", + "with open(\"imagenet_classes.txt\", \"r\") as f:\n", + " categories = [s.strip() for s in f.readlines()]\n", + "# Show top categories per image\n", + "top5_prob, top5_catid = torch.topk(probabilities, 5)\n", + "for i in range(top5_prob.size(0)):\n", + " print(categories[top5_catid[i]], top5_prob[i].item())" ] }, { "cell_type": "markdown", + "id": "10c80f6a", "metadata": {}, "source": [ "### Model Description\n", @@ -99,16 +133,16 @@ "Conventionally, people tend to design *one efficient model* for *all hardware platforms*. But different hardware has different properties, for example, CPU has higher frequency and GPU is better at parallization. Therefore, instead of generalizing, we need to **specialize** CNN architectures for different hardware platforms. As shown in below, with similar accuracy, specialization offers free yet significant performance boost on all three platforms.\n", "\n", "| Model structure | GPU Latency | CPU Latency | Mobile Latency\n", - "| --------------- | ----------- | ----------- | ----------- | \n", + "| --------------- | ----------- | ----------- | ----------- |\n", "| proxylessnas_gpu | **5.1ms** | 204.9ms | 124ms |\n", - "| proxylessnas_cpu | 7.4ms | **138.7ms** | 116ms | \n", + "| proxylessnas_cpu | 7.4ms | **138.7ms** | 116ms |\n", "| proxylessnas_mobile | 7.2ms | 164.1ms | **78ms** |\n", "\n", "The corresponding top-1 accuracy with pretrained models are listed below.\n", "\n", "| Model structure | Top-1 error |\n", - "| --------------- | ----------- | \n", - "| proxylessnas_cpu | 24.7 | \n", + "| --------------- | ----------- |\n", + "| proxylessnas_cpu | 24.7 |\n", "| proxylessnas_gpu | 24.9 |\n", "| proxylessnas_mobile | 25.4 |\n", "| proxylessnas_mobile_14 | 23.3 |\n", @@ -121,5 +155,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/pytorch_vision_resnest.ipynb b/assets/hub/pytorch_vision_resnest.ipynb new file mode 100644 index 000000000000..ce0325d9a1ed --- /dev/null +++ b/assets/hub/pytorch_vision_resnest.ipynb @@ -0,0 +1,152 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "787a3286", + "metadata": {}, + "source": [ + "### This notebook is optionally accelerated with a GPU runtime.\n", + "### If you would like to use this acceleration, please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# ResNeSt\n", + "\n", + "*Author: Hang Zhang*\n", + "\n", + "**A new ResNet variant.**\n", + "\n", + "\"alt\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "883587e5", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "# get list of models\n", + "torch.hub.list('zhanghang1989/ResNeSt', force_reload=True)\n", + "# load pretrained models, using ResNeSt-50 as an example\n", + "model = torch.hub.load('zhanghang1989/ResNeSt', 'resnest50', pretrained=True)\n", + "model.eval()" + ] + }, + { + "cell_type": "markdown", + "id": "605ea118", + "metadata": {}, + "source": [ + "All pre-trained models expect input images normalized in the same way,\n", + "i.e. mini-batches of 3-channel RGB images of shape `(3 x H x W)`, where `H` and `W` are expected to be at least `224`.\n", + "The images have to be loaded in to a range of `[0, 1]` and then normalized using `mean = [0.485, 0.456, 0.406]`\n", + "and `std = [0.229, 0.224, 0.225]`.\n", + "\n", + "Here's a sample execution." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bc6101ba", + "metadata": {}, + "outputs": [], + "source": [ + "# Download an example image from the pytorch website\n", + "import urllib\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n", + "try: urllib.URLopener().retrieve(url, filename)\n", + "except: urllib.request.urlretrieve(url, filename)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4339ed07", + "metadata": {}, + "outputs": [], + "source": [ + "# sample execution (requires torchvision)\n", + "from PIL import Image\n", + "from torchvision import transforms\n", + "input_image = Image.open(filename)\n", + "preprocess = transforms.Compose([\n", + " transforms.Resize(256),\n", + " transforms.CenterCrop(224),\n", + " transforms.ToTensor(),\n", + " transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n", + "])\n", + "input_tensor = preprocess(input_image)\n", + "input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model\n", + "\n", + "# move the input and model to GPU for speed if available\n", + "if torch.cuda.is_available():\n", + " input_batch = input_batch.to('cuda')\n", + " model.to('cuda')\n", + "\n", + "with torch.no_grad():\n", + " output = model(input_batch)\n", + "# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes\n", + "print(output[0])\n", + "# The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n", + "probabilities = torch.nn.functional.softmax(output[0], dim=0)\n", + "print(probabilities)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f434fa2c", + "metadata": {}, + "outputs": [], + "source": [ + "# Download ImageNet labels\n", + "!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2d8f0dff", + "metadata": {}, + "outputs": [], + "source": [ + "# Read the categories\n", + "with open(\"imagenet_classes.txt\", \"r\") as f:\n", + " categories = [s.strip() for s in f.readlines()]\n", + "# Show top categories per image\n", + "top5_prob, top5_catid = torch.topk(probabilities, 5)\n", + "for i in range(top5_prob.size(0)):\n", + " print(categories[top5_catid[i]], top5_prob[i].item())" + ] + }, + { + "cell_type": "markdown", + "id": "4223d2df", + "metadata": {}, + "source": [ + "### Model Description\n", + "\n", + "ResNeSt models are from the [ResNeSt: Split-Attention Networks](https://arxiv.org/pdf/2004.08955.pdf) paper.\n", + "\n", + "While image classification models have recently continued to advance, most downstream applications such as object detection and semantic segmentation still employ ResNet variants as the backbone network due to their simple and modular structure. We present a simple and modular Split-Attention block that enables attention across feature-map groups. By stacking these Split-Attention blocks ResNet-style, we obtain a new ResNet variant which we call ResNeSt. Our network preserves the overall ResNet structure to be used in downstream tasks straightforwardly without introducing additional computational costs. ResNeSt models outperform other networks with similar model complexities, and also help downstream tasks including object detection, instance segmentation and semantic segmentation.\n", + "\n", + "| | crop size | PyTorch |\n", + "|-------------|-----------|---------|\n", + "| ResNeSt-50 | 224 | 81.03 |\n", + "| ResNeSt-101 | 256 | 82.83 |\n", + "| ResNeSt-200 | 320 | 83.84 |\n", + "| ResNeSt-269 | 416 | 84.54 |\n", + "\n", + "### References\n", + "\n", + " - [ResNeSt: Split-Attention Networks](https://arxiv.org/abs/2004.08955)." + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/pytorch_vision_resnet.ipynb b/assets/hub/pytorch_vision_resnet.ipynb index 3c7693bee364..7776c1056362 100644 --- a/assets/hub/pytorch_vision_resnet.ipynb +++ b/assets/hub/pytorch_vision_resnet.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "f9bbb13c", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -21,21 +22,23 @@ { "cell_type": "code", "execution_count": null, + "id": "0a40e313", "metadata": {}, "outputs": [], "source": [ "import torch\n", - "model = torch.hub.load('pytorch/vision', 'resnet18', pretrained=True)\n", + "model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet18', pretrained=True)\n", "# or any of these variants\n", - "# model = torch.hub.load('pytorch/vision', 'resnet34', pretrained=True)\n", - "# model = torch.hub.load('pytorch/vision', 'resnet50', pretrained=True)\n", - "# model = torch.hub.load('pytorch/vision', 'resnet101', pretrained=True)\n", - "# model = torch.hub.load('pytorch/vision', 'resnet152', pretrained=True)\n", + "# model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet34', pretrained=True)\n", + "# model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet50', pretrained=True)\n", + "# model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet101', pretrained=True)\n", + "# model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet152', pretrained=True)\n", "model.eval()" ] }, { "cell_type": "markdown", + "id": "a4bfb4a7", "metadata": {}, "source": [ "All pre-trained models expect input images normalized in the same way,\n", @@ -49,12 +52,13 @@ { "cell_type": "code", "execution_count": null, + "id": "c2b4cc0c", "metadata": {}, "outputs": [], "source": [ "# Download an example image from the pytorch website\n", "import urllib\n", - "url, filename = (\"https://github.com/pytorch/hub/raw/master/dog.jpg\", \"dog.jpg\")\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n", "try: urllib.URLopener().retrieve(url, filename)\n", "except: urllib.request.urlretrieve(url, filename)" ] @@ -62,6 +66,7 @@ { "cell_type": "code", "execution_count": null, + "id": "e56bce1b", "metadata": {}, "outputs": [], "source": [ @@ -85,14 +90,43 @@ "\n", "with torch.no_grad():\n", " output = model(input_batch)\n", - "# Tensor of shape 1000, with confidence scores over Imagenet's 1000 classes\n", + "# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes\n", "print(output[0])\n", "# The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n", - "print(torch.nn.functional.softmax(output[0], dim=0))\n" + "probabilities = torch.nn.functional.softmax(output[0], dim=0)\n", + "print(probabilities)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b9a4bbe9", + "metadata": {}, + "outputs": [], + "source": [ + "# Download ImageNet labels\n", + "!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d60f1f2e", + "metadata": {}, + "outputs": [], + "source": [ + "# Read the categories\n", + "with open(\"imagenet_classes.txt\", \"r\") as f:\n", + " categories = [s.strip() for s in f.readlines()]\n", + "# Show top categories per image\n", + "top5_prob, top5_catid = torch.topk(probabilities, 5)\n", + "for i in range(top5_prob.size(0)):\n", + " print(categories[top5_catid[i]], top5_prob[i].item())" ] }, { "cell_type": "markdown", + "id": "4ee80aa4", "metadata": {}, "source": [ "### Model Description\n", @@ -100,7 +134,7 @@ "Resnet models were proposed in \"Deep Residual Learning for Image Recognition\".\n", "Here we have the 5 versions of resnet models, which contains 18, 34, 50, 101, 152 layers respectively.\n", "Detailed model architectures can be found in Table 1.\n", - "Their 1-crop error rates on imagenet dataset with pretrained models are listed below.\n", + "Their 1-crop error rates on ImageNet dataset with pretrained models are listed below.\n", "\n", "| Model structure | Top-1 error | Top-5 error |\n", "| --------------- | ----------- | ----------- |\n", @@ -118,5 +152,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/pytorch_vision_resnext.ipynb b/assets/hub/pytorch_vision_resnext.ipynb index b3c5f07761a5..288e663405a8 100644 --- a/assets/hub/pytorch_vision_resnext.ipynb +++ b/assets/hub/pytorch_vision_resnext.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "e95ba684", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -21,18 +22,20 @@ { "cell_type": "code", "execution_count": null, + "id": "4361f74f", "metadata": {}, "outputs": [], "source": [ "import torch\n", - "model = torch.hub.load('pytorch/vision', 'resnext50_32x4d', pretrained=True)\n", + "model = torch.hub.load('pytorch/vision:v0.10.0', 'resnext50_32x4d', pretrained=True)\n", "# or\n", - "# model = torch.hub.load('pytorch/vision', 'resnext101_32x8d', pretrained=True)\n", + "# model = torch.hub.load('pytorch/vision:v0.10.0', 'resnext101_32x8d', pretrained=True)\n", "model.eval()" ] }, { "cell_type": "markdown", + "id": "29e4eb83", "metadata": {}, "source": [ "All pre-trained models expect input images normalized in the same way,\n", @@ -46,12 +49,13 @@ { "cell_type": "code", "execution_count": null, + "id": "95e7b563", "metadata": {}, "outputs": [], "source": [ "# Download an example image from the pytorch website\n", "import urllib\n", - "url, filename = (\"https://github.com/pytorch/hub/raw/master/dog.jpg\", \"dog.jpg\")\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n", "try: urllib.URLopener().retrieve(url, filename)\n", "except: urllib.request.urlretrieve(url, filename)" ] @@ -59,6 +63,7 @@ { "cell_type": "code", "execution_count": null, + "id": "4f7dfbc5", "metadata": {}, "outputs": [], "source": [ @@ -82,14 +87,45 @@ "\n", "with torch.no_grad():\n", " output = model(input_batch)\n", - "# Tensor of shape 1000, with confidence scores over Imagenet's 1000 classes\n", + "# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes\n", "print(output[0])\n", "# The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n", - "print(torch.nn.functional.softmax(output[0], dim=0))\n" + "probabilities = torch.nn.functional.softmax(output[0], dim=0)\n", + "print(probabilities)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "00a012c2", + "metadata": {}, + "outputs": [], + "source": [ + "# Download ImageNet labels\n", + "!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f3406da4", + "metadata": {}, + "outputs": [], + "source": [ + "# Read the categories\n", + "with open(\"imagenet_classes.txt\", \"r\") as f:\n", + " categories = [s.strip() for s in f.readlines()]\n", + "\n", + "# Show top categories per image\n", + "top5_prob, top5_catid = torch.topk(probabilities, 5)\n", + "\n", + "for i in range(top5_prob.size(0)):\n", + " print(categories[top5_catid[i]], top5_prob[i].item())" ] }, { "cell_type": "markdown", + "id": "377290b1", "metadata": {}, "source": [ "### Model Description\n", @@ -97,7 +133,7 @@ "Resnext models were proposed in [Aggregated Residual Transformations for Deep Neural Networks](https://arxiv.org/abs/1611.05431).\n", "Here we have the 2 versions of resnet models, which contains 50, 101 layers repspectively.\n", "A comparison in model archetechure between resnet50 and resnext50 can be found in Table 1.\n", - "Their 1-crop error rates on imagenet dataset with pretrained models are listed below.\n", + "Their 1-crop error rates on ImageNet dataset with pretrained models are listed below.\n", "\n", "| Model structure | Top-1 error | Top-5 error |\n", "| ----------------- | ----------- | ----------- |\n", @@ -112,5 +148,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/pytorch_vision_shufflenet_v2.ipynb b/assets/hub/pytorch_vision_shufflenet_v2.ipynb index 0bbdedf93b55..cf0342a9e1d9 100644 --- a/assets/hub/pytorch_vision_shufflenet_v2.ipynb +++ b/assets/hub/pytorch_vision_shufflenet_v2.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "f4a19300", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -13,7 +14,7 @@ "\n", "*Author: Pytorch Team*\n", "\n", - "**An efficient ConvNet optimized for speed and memory, pre-trained on Imagenet**\n", + "**An efficient ConvNet optimized for speed and memory, pre-trained on ImageNet**\n", "\n", "_ | _\n", "- | -\n", @@ -23,16 +24,18 @@ { "cell_type": "code", "execution_count": null, + "id": "5473631b", "metadata": {}, "outputs": [], "source": [ "import torch\n", - "model = torch.hub.load('pytorch/vision', 'shufflenet_v2_x1_0', pretrained=True)\n", + "model = torch.hub.load('pytorch/vision:v0.10.0', 'shufflenet_v2_x1_0', pretrained=True)\n", "model.eval()" ] }, { "cell_type": "markdown", + "id": "d740addf", "metadata": {}, "source": [ "All pre-trained models expect input images normalized in the same way,\n", @@ -46,12 +49,13 @@ { "cell_type": "code", "execution_count": null, + "id": "eca945f9", "metadata": {}, "outputs": [], "source": [ "# Download an example image from the pytorch website\n", "import urllib\n", - "url, filename = (\"https://github.com/pytorch/hub/raw/master/dog.jpg\", \"dog.jpg\")\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n", "try: urllib.URLopener().retrieve(url, filename)\n", "except: urllib.request.urlretrieve(url, filename)" ] @@ -59,6 +63,7 @@ { "cell_type": "code", "execution_count": null, + "id": "d74a9ac3", "metadata": {}, "outputs": [], "source": [ @@ -82,14 +87,43 @@ "\n", "with torch.no_grad():\n", " output = model(input_batch)\n", - "# Tensor of shape 1000, with confidence scores over Imagenet's 1000 classes\n", + "# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes\n", "print(output[0])\n", "# The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n", - "print(torch.nn.functional.softmax(output[0], dim=0))\n" + "probabilities = torch.nn.functional.softmax(output[0], dim=0)\n", + "print(probabilities)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f5c09336", + "metadata": {}, + "outputs": [], + "source": [ + "# Download ImageNet labels\n", + "!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25fb267b", + "metadata": {}, + "outputs": [], + "source": [ + "# Read the categories\n", + "with open(\"imagenet_classes.txt\", \"r\") as f:\n", + " categories = [s.strip() for s in f.readlines()]\n", + "# Show top categories per image\n", + "top5_prob, top5_catid = torch.topk(probabilities, 5)\n", + "for i in range(top5_prob.size(0)):\n", + " print(categories[top5_catid[i]], top5_prob[i].item())" ] }, { "cell_type": "markdown", + "id": "54e715bf", "metadata": {}, "source": [ "### Model Description\n", @@ -109,5 +143,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/pytorch_vision_snnmlp.ipynb b/assets/hub/pytorch_vision_snnmlp.ipynb new file mode 100644 index 000000000000..f9d98e01f998 --- /dev/null +++ b/assets/hub/pytorch_vision_snnmlp.ipynb @@ -0,0 +1,141 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "a6c89fb4", + "metadata": {}, + "source": [ + "### This notebook is optionally accelerated with a GPU runtime.\n", + "### If you would like to use this acceleration, please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# SNNMLP\n", + "\n", + "*Author: Huawei Noah's Ark Lab*\n", + "\n", + "**Brain-inspired Multilayer Perceptron with Spiking Neurons**\n", + "\n", + "\"alt\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f8525cb4", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "model = torch.hub.load('huawei-noah/Efficient-AI-Backbones', 'snnmlp_t', pretrained=True)\n", + "# or\n", + "# model = torch.hub.load('huawei-noah/Efficient-AI-Backbones', 'snnmlp_s', pretrained=True)\n", + "# or\n", + "# model = torch.hub.load('huawei-noah/Efficient-AI-Backbones', 'snnmlp_b', pretrained=True)\n", + "model.eval()" + ] + }, + { + "cell_type": "markdown", + "id": "ff75baee", + "metadata": {}, + "source": [ + "All pre-trained models expect input images normalized in the same way,\n", + "i.e. mini-batches of 3-channel RGB images of shape `(3 x H x W)`, where `H` and `W` are expected to be at least `224`.\n", + "The images have to be loaded in to a range of `[0, 1]` and then normalized using `mean = [0.485, 0.456, 0.406]`\n", + "and `std = [0.229, 0.224, 0.225]`.\n", + "\n", + "Here's a sample execution." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "97496f87", + "metadata": {}, + "outputs": [], + "source": [ + "# Download an example image from the pytorch website\n", + "import urllib\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/dog.jpg\", \"dog.jpg\")\n", + "try: urllib.URLopener().retrieve(url, filename)\n", + "except: urllib.request.urlretrieve(url, filename)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f1ef1fb8", + "metadata": {}, + "outputs": [], + "source": [ + "# sample execution (requires torchvision)\n", + "from PIL import Image\n", + "from torchvision import transforms\n", + "input_image = Image.open(filename)\n", + "preprocess = transforms.Compose([\n", + " transforms.Resize(256),\n", + " transforms.CenterCrop(224),\n", + " transforms.ToTensor(),\n", + " transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n", + "])\n", + "input_tensor = preprocess(input_image)\n", + "input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model\n", + "\n", + "# move the input and model to GPU for speed if available\n", + "if torch.cuda.is_available():\n", + " input_batch = input_batch.to('cuda')\n", + " model.to('cuda')\n", + "\n", + "with torch.no_grad():\n", + " output = model(input_batch)\n", + "# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes\n", + "print(output[0])\n", + "# The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n", + "print(torch.nn.functional.softmax(output[0], dim=0))\n" + ] + }, + { + "cell_type": "markdown", + "id": "7d27433d", + "metadata": {}, + "source": [ + "### Model Description\n", + "\n", + "SNNMLP incorporates the mechanism of LIF neurons into the MLP models, to achieve better accuracy without extra FLOPs. We propose a full-precision LIF operation to communicate between patches, including horizontal LIF and vertical LIF in different directions. We also propose to use group LIF to extract better local features. With LIF modules, our SNNMLP model achieves 81.9%, 83.3% and 83.6% top-1 accuracy on ImageNet dataset with only 4.4G, 8.5G and 15.2G FLOPs, respectively.\n", + "\n", + "The corresponding accuracy on ImageNet dataset with pretrained model is listed below.\n", + "\n", + "| Model structure | #Parameters | FLOPs | Top-1 acc |\n", + "| --------------- | ----------- | ----------- | ----------- |\n", + "| SNNMLP Tiny | 28M | 4.4G | 81.88 |\n", + "| SNNMLP Small | 50M | 8.5G | 83.30 |\n", + "| SNNMLP Base | 88M | 15.2G | 85.59 |\n", + "\n", + "\n", + "### References\n", + "\n", + "You can read the full paper [here](https://arxiv.org/abs/2203.14679)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5f52ea04", + "metadata": {}, + "outputs": [], + "source": [ + "@inproceedings{li2022brain,\n", + " title={Brain-inspired multilayer perceptron with spiking neurons},\n", + " author={Li, Wenshuo and Chen, Hanting and Guo, Jianyuan and Zhang, Ziyang and Wang, Yunhe},\n", + " booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},\n", + " pages={783--793},\n", + " year={2022}\n", + "}" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/pytorch_vision_squeezenet.ipynb b/assets/hub/pytorch_vision_squeezenet.ipynb index ef1215d57d7b..321459b32712 100644 --- a/assets/hub/pytorch_vision_squeezenet.ipynb +++ b/assets/hub/pytorch_vision_squeezenet.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "45534887", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -21,18 +22,20 @@ { "cell_type": "code", "execution_count": null, + "id": "31cb004d", "metadata": {}, "outputs": [], "source": [ "import torch\n", - "model = torch.hub.load('pytorch/vision', 'squeezenet1_0', pretrained=True)\n", + "model = torch.hub.load('pytorch/vision:v0.10.0', 'squeezenet1_0', pretrained=True)\n", "# or\n", - "# model = torch.hub.load('pytorch/vision', 'squeezenet1_1', pretrained=True)\n", + "# model = torch.hub.load('pytorch/vision:v0.10.0', 'squeezenet1_1', pretrained=True)\n", "model.eval()" ] }, { "cell_type": "markdown", + "id": "56023f86", "metadata": {}, "source": [ "All pre-trained models expect input images normalized in the same way,\n", @@ -46,12 +49,13 @@ { "cell_type": "code", "execution_count": null, + "id": "5a002f61", "metadata": {}, "outputs": [], "source": [ "# Download an example image from the pytorch website\n", "import urllib\n", - "url, filename = (\"https://github.com/pytorch/hub/raw/master/dog.jpg\", \"dog.jpg\")\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n", "try: urllib.URLopener().retrieve(url, filename)\n", "except: urllib.request.urlretrieve(url, filename)" ] @@ -59,6 +63,7 @@ { "cell_type": "code", "execution_count": null, + "id": "e4081954", "metadata": {}, "outputs": [], "source": [ @@ -82,14 +87,43 @@ "\n", "with torch.no_grad():\n", " output = model(input_batch)\n", - "# Tensor of shape 1000, with confidence scores over Imagenet's 1000 classes\n", + "# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes\n", "print(output[0])\n", "# The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n", - "print(torch.nn.functional.softmax(output[0], dim=0))\n" + "probabilities = torch.nn.functional.softmax(output[0], dim=0)\n", + "print(probabilities)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0199159d", + "metadata": {}, + "outputs": [], + "source": [ + "# Download ImageNet labels\n", + "!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c015a43c", + "metadata": {}, + "outputs": [], + "source": [ + "# Read the categories\n", + "with open(\"imagenet_classes.txt\", \"r\") as f:\n", + " categories = [s.strip() for s in f.readlines()]\n", + "# Show top categories per image\n", + "top5_prob, top5_catid = torch.topk(probabilities, 5)\n", + "for i in range(top5_prob.size(0)):\n", + " print(categories[top5_catid[i]], top5_prob[i].item())" ] }, { "cell_type": "markdown", + "id": "e69f32f8", "metadata": {}, "source": [ "### Model Description\n", @@ -99,7 +133,7 @@ "Model `squeezenet1_1` is from the [official squeezenet repo](https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1).\n", "It has 2.4x less computation and slightly fewer parameters than `squeezenet1_0`, without sacrificing accuracy.\n", "\n", - "Their 1-crop error rates on imagenet dataset with pretrained models are listed below.\n", + "Their 1-crop error rates on ImageNet dataset with pretrained models are listed below.\n", "\n", "| Model structure | Top-1 error | Top-5 error |\n", "| --------------- | ----------- | ----------- |\n", @@ -114,5 +148,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/pytorch_vision_vgg.ipynb b/assets/hub/pytorch_vision_vgg.ipynb index 658af86910b9..b9f22450011a 100644 --- a/assets/hub/pytorch_vision_vgg.ipynb +++ b/assets/hub/pytorch_vision_vgg.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "2713e1bb", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -13,7 +14,7 @@ "\n", "*Author: Pytorch Team*\n", "\n", - "**Award winning ConvNets from 2014 Imagenet ILSVRC challenge**\n", + "**Award winning ConvNets from 2014 ImageNet ILSVRC challenge**\n", "\n", "\"alt\"" ] @@ -21,24 +22,26 @@ { "cell_type": "code", "execution_count": null, + "id": "e6fe1a6d", "metadata": {}, "outputs": [], "source": [ "import torch\n", - "model = torch.hub.load('pytorch/vision', 'vgg11', pretrained=True)\n", + "model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg11', pretrained=True)\n", "# or any of these variants\n", - "# model = torch.hub.load('pytorch/vision', 'vgg11_bn', pretrained=True)\n", - "# model = torch.hub.load('pytorch/vision', 'vgg13', pretrained=True)\n", - "# model = torch.hub.load('pytorch/vision', 'vgg13_bn', pretrained=True)\n", - "# model = torch.hub.load('pytorch/vision', 'vgg16', pretrained=True)\n", - "# model = torch.hub.load('pytorch/vision', 'vgg16_bn', pretrained=True)\n", - "# model = torch.hub.load('pytorch/vision', 'vgg19', pretrained=True)\n", - "# model = torch.hub.load('pytorch/vision', 'vgg19_bn', pretrained=True)\n", + "# model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg11_bn', pretrained=True)\n", + "# model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg13', pretrained=True)\n", + "# model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg13_bn', pretrained=True)\n", + "# model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg16', pretrained=True)\n", + "# model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg16_bn', pretrained=True)\n", + "# model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg19', pretrained=True)\n", + "# model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg19_bn', pretrained=True)\n", "model.eval()" ] }, { "cell_type": "markdown", + "id": "9ff09011", "metadata": {}, "source": [ "All pre-trained models expect input images normalized in the same way,\n", @@ -52,12 +55,13 @@ { "cell_type": "code", "execution_count": null, + "id": "a0eaf3d4", "metadata": {}, "outputs": [], "source": [ "# Download an example image from the pytorch website\n", "import urllib\n", - "url, filename = (\"https://github.com/pytorch/hub/raw/master/dog.jpg\", \"dog.jpg\")\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n", "try: urllib.URLopener().retrieve(url, filename)\n", "except: urllib.request.urlretrieve(url, filename)" ] @@ -65,6 +69,7 @@ { "cell_type": "code", "execution_count": null, + "id": "2b9df68e", "metadata": {}, "outputs": [], "source": [ @@ -88,25 +93,54 @@ "\n", "with torch.no_grad():\n", " output = model(input_batch)\n", - "# Tensor of shape 1000, with confidence scores over Imagenet's 1000 classes\n", + "# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes\n", "print(output[0])\n", "# The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n", - "print(torch.nn.functional.softmax(output[0], dim=0))\n" + "probabilities = torch.nn.functional.softmax(output[0], dim=0)\n", + "print(probabilities)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "79a7cd22", + "metadata": {}, + "outputs": [], + "source": [ + "# Download ImageNet labels\n", + "!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0267d94a", + "metadata": {}, + "outputs": [], + "source": [ + "# Read the categories\n", + "with open(\"imagenet_classes.txt\", \"r\") as f:\n", + " categories = [s.strip() for s in f.readlines()]\n", + "# Show top categories per image\n", + "top5_prob, top5_catid = torch.topk(probabilities, 5)\n", + "for i in range(top5_prob.size(0)):\n", + " print(categories[top5_catid[i]], top5_prob[i].item())" ] }, { "cell_type": "markdown", + "id": "28a4abca", "metadata": {}, "source": [ "### Model Description\n", "\n", "Here we have implementations for the models proposed in [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556),\n", - "for each configurations and their with bachnorm version.\n", + "for each configurations and their with batchnorm version.\n", "\n", "For example, configuration `A` presented in the paper is `vgg11`, configuration `B` is `vgg13`, configuration `D` is `vgg16`\n", "and configuration `E` is `vgg19`. Their batchnorm version are suffixed with `_bn`.\n", "\n", - "Their 1-crop error rates on imagenet dataset with pretrained models are listed below.\n", + "Their Top-1 error rates on ImageNet dataset with pretrained models are listed below.\n", "\n", "| Model structure | Top-1 error | Top-5 error |\n", "| --------------- | ----------- | ----------- |\n", @@ -127,5 +161,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/pytorch_vision_wide_resnet.ipynb b/assets/hub/pytorch_vision_wide_resnet.ipynb index 4b70eb9fc158..2fea7ffd6ffd 100644 --- a/assets/hub/pytorch_vision_wide_resnet.ipynb +++ b/assets/hub/pytorch_vision_wide_resnet.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "ff8d945e", "metadata": {}, "source": [ "### This notebook is optionally accelerated with a GPU runtime.\n", @@ -21,19 +22,21 @@ { "cell_type": "code", "execution_count": null, + "id": "464e5aaf", "metadata": {}, "outputs": [], "source": [ "import torch\n", "# load WRN-50-2:\n", - "model = torch.hub.load('pytorch/vision', 'wide_resnet50_2', pretrained=True)\n", + "model = torch.hub.load('pytorch/vision:v0.10.0', 'wide_resnet50_2', pretrained=True)\n", "# or WRN-101-2\n", - "model = torch.hub.load('pytorch/vision', 'wide_resnet101_2', pretrained=True)\n", + "model = torch.hub.load('pytorch/vision:v0.10.0', 'wide_resnet101_2', pretrained=True)\n", "model.eval()" ] }, { "cell_type": "markdown", + "id": "762506a5", "metadata": {}, "source": [ "All pre-trained models expect input images normalized in the same way,\n", @@ -47,12 +50,13 @@ { "cell_type": "code", "execution_count": null, + "id": "d478d54d", "metadata": {}, "outputs": [], "source": [ "# Download an example image from the pytorch website\n", "import urllib\n", - "url, filename = (\"https://github.com/pytorch/hub/raw/master/dog.jpg\", \"dog.jpg\")\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n", "try: urllib.URLopener().retrieve(url, filename)\n", "except: urllib.request.urlretrieve(url, filename)" ] @@ -60,6 +64,7 @@ { "cell_type": "code", "execution_count": null, + "id": "e8c65e9c", "metadata": {}, "outputs": [], "source": [ @@ -83,14 +88,43 @@ "\n", "with torch.no_grad():\n", " output = model(input_batch)\n", - "# Tensor of shape 1000, with confidence scores over Imagenet's 1000 classes\n", + "# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes\n", "print(output[0])\n", "# The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n", - "print(torch.nn.functional.softmax(output[0], dim=0))\n" + "probabilities = torch.nn.functional.softmax(output[0], dim=0)\n", + "print(probabilities)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "83885b0f", + "metadata": {}, + "outputs": [], + "source": [ + "# Download ImageNet labels\n", + "!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0a8d4feb", + "metadata": {}, + "outputs": [], + "source": [ + "# Read the categories\n", + "with open(\"imagenet_classes.txt\", \"r\") as f:\n", + " categories = [s.strip() for s in f.readlines()]\n", + "# Show top categories per image\n", + "top5_prob, top5_catid = torch.topk(probabilities, 5)\n", + "for i in range(top5_prob.size(0)):\n", + " print(categories[top5_catid[i]], top5_prob[i].item())" ] }, { "cell_type": "markdown", + "id": "60c699b1", "metadata": {}, "source": [ "### Model Description\n", @@ -119,5 +153,5 @@ ], "metadata": {}, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 5 } diff --git a/assets/hub/sigsep_open-unmix-pytorch_umx.ipynb b/assets/hub/sigsep_open-unmix-pytorch_umx.ipynb new file mode 100644 index 000000000000..dbbdb683fff5 --- /dev/null +++ b/assets/hub/sigsep_open-unmix-pytorch_umx.ipynb @@ -0,0 +1,120 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "52f8d602", + "metadata": {}, + "source": [ + "### This notebook is optionally accelerated with a GPU runtime.\n", + "### If you would like to use this acceleration, please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# Open-Unmix\n", + "\n", + "*Author: Inria*\n", + "\n", + "**Reference implementation for music source separation**\n", + "\n", + "\"alt\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bcf93292", + "metadata": {}, + "outputs": [], + "source": [ + "%%bash\n", + "# assuming you have a PyTorch >=1.6.0 installed\n", + "pip install -q torchaudio" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c3acad29", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "\n", + "# loading umxhq four target separator\n", + "separator = torch.hub.load('sigsep/open-unmix-pytorch', 'umxhq')\n", + "\n", + "# generate random audio\n", + "# ... with shape (nb_samples, nb_channels, nb_timesteps)\n", + "# ... and with the same sample rate as that of the separator\n", + "audio = torch.rand((1, 2, 100000))\n", + "original_sample_rate = separator.sample_rate\n", + "\n", + "# make sure to resample the audio to models' sample rate, separator.sample_rate, if the two are different\n", + "# resampler = torchaudio.transforms.Resample(original_sample_rate, separator.sample_rate)\n", + "# audio = resampler(audio)\n", + "\n", + "estimates = separator(audio)\n", + "# estimates.shape = (1, 4, 2, 100000)" + ] + }, + { + "cell_type": "markdown", + "id": "04083463", + "metadata": {}, + "source": [ + "### Model Description\n", + "\n", + "__Open-Unmix__ provides ready-to-use models that allow users to separate pop music into four stems: __vocals__, __drums__, __bass__ and the remaining __other__ instruments. The models were pre-trained on the freely available [MUSDB18](https://sigsep.github.io/datasets/musdb.html) dataset.\n", + "\n", + "Each target model is based on a three-layer bidirectional deep LSTM. The model learns to predict the magnitude spectrogram of a target source, like vocals, from the magnitude spectrogram of a mixture input. Internally, the prediction is obtained by applying a mask on the input. The model is optimized in the magnitude domain using mean squared error.\n", + "\n", + "A `Separator` meta-model (as shown in the code example above) puts together multiple _Open-unmix_ spectrogram models for each desired target, and combines their output through a multichannel generalized Wiener filter, before application of inverse STFTs using `torchaudio`.\n", + "The filtering is differentiable (but parameter-free) version of [norbert](https://github.com/sigsep/norbert).\n", + "\n", + "### Pre-trained `Separator` models\n", + "\n", + "* __`umxhq` (default)__ trained on [MUSDB18-HQ](https://sigsep.github.io/datasets/musdb.html#uncompressed-wav) which comprises the same tracks as in MUSDB18 but un-compressed which yield in a full bandwidth of 22050 Hz.\n", + "\n", + "* __`umx`__ is trained on the regular [MUSDB18](https://sigsep.github.io/datasets/musdb.html#compressed-stems) which is bandwidth limited to 16 kHz due to AAC compression. This model should be used for comparison with other (older) methods for evaluation in [SiSEC18](sisec18.unmix.app).\n", + "\n", + "Furthermore, we provide a model for speech enhancement trained by [Sony Corporation](link)\n", + "\n", + "* __`umxse`__ speech enhancement model is trained on the 28-speaker version of the [Voicebank+DEMAND corpus](https://datashare.is.ed.ac.uk/handle/10283/1942?show=full).\n", + "\n", + "All three models are also available as spectrogram (core) models, which take magnitude spectrogram inputs and ouput separated spectrograms.\n", + "These models can be loaded using `umxhq_spec`, `umx_spec` and `umxse_spec`.\n", + "\n", + "### Details\n", + "\n", + "For additional examples, documentation and usage examples, please visit this [the github repo](https://github.com/sigsep/open-unmix-pytorch).\n", + "\n", + "Furthermore, the models and all utility function to preprocess, read and save audio stems, are available in a python package that can be installed via" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "613fc90f", + "metadata": {}, + "outputs": [], + "source": [ + "%%bash\n", + "pip install openunmix" + ] + }, + { + "cell_type": "markdown", + "id": "6c0ebb5a", + "metadata": {}, + "source": [ + "### References\n", + "\n", + "- [Open-Unmix - A Reference Implementation for Music Source Separation](https://doi.org/10.21105/joss.01667)\n", + "- [SigSep - Open Ressources for Music Separation](https://sigsep.github.io/)" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/simplenet.ipynb b/assets/hub/simplenet.ipynb new file mode 100644 index 000000000000..5b8f37dca130 --- /dev/null +++ b/assets/hub/simplenet.ipynb @@ -0,0 +1,169 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "5aee59b7", + "metadata": {}, + "source": [ + "### This notebook is optionally accelerated with a GPU runtime.\n", + "### If you would like to use this acceleration, please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# SimpleNet\n", + "\n", + "*Author: Seyyed Hossein Hasanpour*\n", + "\n", + "**Lets Keep it simple, Using simple architectures to outperform deeper and more complex architectures**\n", + "\n", + "\"alt\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25f86a3a", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "model = torch.hub.load(\"coderx7/simplenet_pytorch:v1.0.0\", \"simplenetv1_5m_m1\", pretrained=True)\n", + "# or any of these variants\n", + "# model = torch.hub.load(\"coderx7/simplenet_pytorch:v1.0.0\", \"simplenetv1_5m_m2\", pretrained=True)\n", + "# model = torch.hub.load(\"coderx7/simplenet_pytorch:v1.0.0\", \"simplenetv1_9m_m1\", pretrained=True)\n", + "# model = torch.hub.load(\"coderx7/simplenet_pytorch:v1.0.0\", \"simplenetv1_9m_m2\", pretrained=True)\n", + "# model = torch.hub.load(\"coderx7/simplenet_pytorch:v1.0.0\", \"simplenetv1_small_m1_05\", pretrained=True)\n", + "# model = torch.hub.load(\"coderx7/simplenet_pytorch:v1.0.0\", \"simplenetv1_small_m2_05\", pretrained=True)\n", + "# model = torch.hub.load(\"coderx7/simplenet_pytorch:v1.0.0\", \"simplenetv1_small_m1_075\", pretrained=True)\n", + "# model = torch.hub.load(\"coderx7/simplenet_pytorch:v1.0.0\", \"simplenetv1_small_m2_075\", pretrained=True)\n", + "model.eval()" + ] + }, + { + "cell_type": "markdown", + "id": "46b82ae3", + "metadata": {}, + "source": [ + "All pre-trained models expect input images normalized in the same way,\n", + "i.e. mini-batches of 3-channel RGB images of shape `(3 x H x W)`, where `H` and `W` are expected to be at least `224`.\n", + "The images have to be loaded in to a range of `[0, 1]` and then normalized using `mean = [0.485, 0.456, 0.406]`\n", + "and `std = [0.229, 0.224, 0.225]`.\n", + "\n", + "Here's a sample execution." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8d5f6eee", + "metadata": {}, + "outputs": [], + "source": [ + "# Download an example image from the pytorch website\n", + "import urllib\n", + "url, filename = (\"https://github.com/pytorch/hub/raw/master/images/dog.jpg\", \"dog.jpg\")\n", + "try: urllib.URLopener().retrieve(url, filename)\n", + "except: urllib.request.urlretrieve(url, filename)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "79bc76af", + "metadata": {}, + "outputs": [], + "source": [ + "# sample execution (requires torchvision)\n", + "from PIL import Image\n", + "from torchvision import transforms\n", + "input_image = Image.open(filename)\n", + "preprocess = transforms.Compose([\n", + " transforms.Resize(256),\n", + " transforms.CenterCrop(224),\n", + " transforms.ToTensor(),\n", + " transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n", + "])\n", + "input_tensor = preprocess(input_image)\n", + "input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model\n", + "\n", + "# move the input and model to GPU for speed if available\n", + "if torch.cuda.is_available():\n", + " input_batch = input_batch.to('cuda')\n", + " model.to('cuda')\n", + "\n", + "with torch.no_grad():\n", + " output = model(input_batch)\n", + "# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes\n", + "print(output[0])\n", + "# The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n", + "probabilities = torch.nn.functional.softmax(output[0], dim=0)\n", + "print(probabilities)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "61342377", + "metadata": {}, + "outputs": [], + "source": [ + "# Download ImageNet labels\n", + "!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b17212a3", + "metadata": {}, + "outputs": [], + "source": [ + "# Read the categories\n", + "with open(\"imagenet_classes.txt\", \"r\") as f:\n", + " categories = [s.strip() for s in f.readlines()]\n", + "# Show top categories per image\n", + "top5_prob, top5_catid = torch.topk(probabilities, 5)\n", + "for i in range(top5_prob.size(0)):\n", + " print(categories[top5_catid[i]], top5_prob[i].item())" + ] + }, + { + "cell_type": "markdown", + "id": "9af232bd", + "metadata": {}, + "source": [ + "### Model Description\n", + "\n", + "SimpleNet models were proposed in \"Lets Keep it simple, Using simple architectures to outperform deeper and more complex architectures\". \n", + "Here we have the 8 versions of simplenet models, which contains 1.5m, 3.2m, 5.7m and 9.5m parameters respectively. \n", + "Detailed model architectures can be found in Table 1 and Table 2. \n", + "Their 1-crop errors on ImageNet dataset with pretrained models are listed below. \n", + "\n", + "The m2 variants \n", + "\n", + "| Model structure | Top-1 errors | Top-5 errors |\n", + "| :------------------------- | :-----------: | :-----------:|\n", + "| simplenetv1_small_m2_05 | 38.33 | 16.512 |\n", + "| simplenetv1_small_m2_075 | 31.494 | 11.85 |\n", + "| simplenetv1_5m_m2 | 27.97 | 9.676 |\n", + "| simplenetv1_9m_m2 | 25.77 | 8.252 |\n", + "\n", + "The m1 variants \n", + "\n", + "| Model structure | Top-1 errors | Top-5 errors |\n", + "| :------------------------- | :-----------: | :-----------:|\n", + "| simplenetv1_small_m1_05 | 38.878 | 17.012 |\n", + "| simplenetv1_small_m1_075 | 32.216 | 12.282 |\n", + "| simplenetv1_5m_m1 | 28.452 | 10.06 |\n", + "| simplenetv1_9m_m1 | 26.208 | 8.514 |\n", + "\n", + "### References\n", + "\n", + " - [Lets Keep it simple, Using simple architectures to outperform deeper and more complex architectures](https://arxiv.org/abs/1608.06037)" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/snakers4_silero-models_stt.ipynb b/assets/hub/snakers4_silero-models_stt.ipynb new file mode 100644 index 000000000000..dad1d225f1c6 --- /dev/null +++ b/assets/hub/snakers4_silero-models_stt.ipynb @@ -0,0 +1,106 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "6fbff570", + "metadata": {}, + "source": [ + "### This notebook is optionally accelerated with a GPU runtime.\n", + "### If you would like to use this acceleration, please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# Silero Speech-To-Text Models\n", + "\n", + "*Author: Silero AI Team*\n", + "\n", + "**A set of compact enterprise-grade pre-trained STT Models for multiple languages.**\n", + "\n", + "_ | _\n", + "- | -\n", + "![alt](https://pytorch.org/assets/images/silero_stt_model.jpg) | ![alt](https://pytorch.org/assets/images/silero_imagenet_moment.png)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fcab74cb", + "metadata": {}, + "outputs": [], + "source": [ + "%%bash\n", + "# this assumes that you have a proper version of PyTorch already installed\n", + "pip install -q torchaudio omegaconf soundfile" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8ba6ed8b", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import zipfile\n", + "import torchaudio\n", + "from glob import glob\n", + "\n", + "device = torch.device('cpu') # gpu also works, but our models are fast enough for CPU\n", + "\n", + "model, decoder, utils = torch.hub.load(repo_or_dir='snakers4/silero-models',\n", + " model='silero_stt',\n", + " language='en', # also available 'de', 'es'\n", + " device=device)\n", + "(read_batch, split_into_batches,\n", + " read_audio, prepare_model_input) = utils # see function signature for details\n", + "\n", + "# download a single file, any format compatible with TorchAudio (soundfile backend)\n", + "torch.hub.download_url_to_file('https://opus-codec.org/static/examples/samples/speech_orig.wav',\n", + " dst ='speech_orig.wav', progress=True)\n", + "test_files = glob('speech_orig.wav')\n", + "batches = split_into_batches(test_files, batch_size=10)\n", + "input = prepare_model_input(read_batch(batches[0]),\n", + " device=device)\n", + "\n", + "output = model(input)\n", + "for example in output:\n", + " print(decoder(example.cpu()))" + ] + }, + { + "cell_type": "markdown", + "id": "ba91675f", + "metadata": {}, + "source": [ + "### Model Description\n", + "\n", + "Silero Speech-To-Text models provide enterprise grade STT in a compact form-factor for several commonly spoken languages. Unlike conventional ASR models our models are robust to a variety of dialects, codecs, domains, noises, lower sampling rates (for simplicity audio should be resampled to 16 kHz). The models consume a normalized audio in the form of samples (i.e. without any pre-processing except for normalization to -1 ... 1) and output frames with token probabilities. We provide a decoder utility for simplicity (we could include it into our model itself, but scripted modules had problems with storing model artifacts i.e. labels during certain export scenarios).\n", + "\n", + "We hope that our efforts with Open-STT and Silero Models will bring the ImageNet moment in speech closer.\n", + "\n", + "### Supported Languages and Formats\n", + "\n", + "As of this page update, the following languages are supported:\n", + "\n", + "- English\n", + "- German\n", + "- Spanish\n", + "\n", + "To see the always up-to-date language list, please visit our [repo](https://github.com/snakers4/silero-models) and see the `yml` [file](https://github.com/snakers4/silero-models/blob/master/models.yml) for all available checkpoints.\n", + "\n", + "### Additional Examples and Benchmarks\n", + "\n", + "For additional examples and other model formats please visit this [link](https://github.com/snakers4/silero-models). For quality and performance benchmarks please see the [wiki](https://github.com/snakers4/silero-models/wiki). These resources will be updated from time to time.\n", + "\n", + "### References\n", + "\n", + "- [Silero Models](https://github.com/snakers4/silero-models)\n", + "- [Alexander Veysov, \"Toward's an ImageNet Moment for Speech-to-Text\", The Gradient, 2020](https://thegradient.pub/towards-an-imagenet-moment-for-speech-to-text/)\n", + "- [Alexander Veysov, \"A Speech-To-Text Practitioner’s Criticisms of Industry and Academia\", The Gradient, 2020](https://thegradient.pub/a-speech-to-text-practitioners-criticisms-of-industry-and-academia/)" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/snakers4_silero-models_tts.ipynb b/assets/hub/snakers4_silero-models_tts.ipynb new file mode 100644 index 000000000000..8c728fa3f854 --- /dev/null +++ b/assets/hub/snakers4_silero-models_tts.ipynb @@ -0,0 +1,99 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "b12b7bd3", + "metadata": {}, + "source": [ + "### This notebook is optionally accelerated with a GPU runtime.\n", + "### If you would like to use this acceleration, please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# Silero Text-To-Speech Models\n", + "\n", + "*Author: Silero AI Team*\n", + "\n", + "**A set of compact enterprise-grade pre-trained TTS Models for multiple languages**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bfca32ed", + "metadata": {}, + "outputs": [], + "source": [ + "%%bash\n", + "# this assumes that you have a proper version of PyTorch already installed\n", + "pip install -q torchaudio omegaconf" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "32161601", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "\n", + "language = 'en'\n", + "speaker = 'lj_16khz'\n", + "device = torch.device('cpu')\n", + "model, symbols, sample_rate, example_text, apply_tts = torch.hub.load(repo_or_dir='snakers4/silero-models',\n", + " model='silero_tts',\n", + " language=language,\n", + " speaker=speaker)\n", + "model = model.to(device) # gpu or cpu\n", + "audio = apply_tts(texts=[example_text],\n", + " model=model,\n", + " sample_rate=sample_rate,\n", + " symbols=symbols,\n", + " device=device)" + ] + }, + { + "cell_type": "markdown", + "id": "35584c26", + "metadata": {}, + "source": [ + "### Model Description\n", + "\n", + "Silero Text-To-Speech models provide enterprise grade TTS in a compact form-factor for several commonly spoken languages:\n", + "\n", + "- One-line usage\n", + "- Naturally sounding speech\n", + "- No GPU or training required\n", + "- Minimalism and lack of dependencies\n", + "- A library of voices in many languages\n", + "- Support for `16kHz` and `8kHz` out of the box\n", + "- High throughput on slow hardware. Decent performance on one CPU thread\n", + "\n", + "### Supported Languages and Formats\n", + "\n", + "As of this page update, the speakers of the following languages are supported both in 8 kHz and 16 kHz:\n", + "\n", + "- Russian (6 speakers)\n", + "- English (1 speaker)\n", + "- German (1 speaker)\n", + "- Spanish (1 speaker)\n", + "- French (1 speaker)\n", + "\n", + "To see the always up-to-date language list, please visit our [repo](https://github.com/snakers4/silero-models) and see the `yml` [file](https://github.com/snakers4/silero-models/blob/master/models.yml) for all available checkpoints.\n", + "\n", + "### Additional Examples and Benchmarks\n", + "\n", + "For additional examples and other model formats please visit this [link](https://github.com/snakers4/silero-models). For quality and performance benchmarks please see the [wiki](https://github.com/snakers4/silero-models/wiki). These resources will be updated from time to time.\n", + "\n", + "### References\n", + "\n", + "- [Silero Models](https://github.com/snakers4/silero-models)\n", + "- [High-Quality Speech-to-Text Made Accessible, Simple and Fast](https://habr.com/ru/post/549482/)" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/snakers4_silero-vad_vad.ipynb b/assets/hub/snakers4_silero-vad_vad.ipynb new file mode 100644 index 000000000000..d4a68b6455ce --- /dev/null +++ b/assets/hub/snakers4_silero-vad_vad.ipynb @@ -0,0 +1,95 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "4e1bb36a", + "metadata": {}, + "source": [ + "### This notebook is optionally accelerated with a GPU runtime.\n", + "### If you would like to use this acceleration, please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# Silero Voice Activity Detector\n", + "\n", + "*Author: Silero AI Team*\n", + "\n", + "**Pre-trained Voice Activity Detector**\n", + "\n", + "\"alt\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5b99d31c", + "metadata": {}, + "outputs": [], + "source": [ + "%%bash\n", + "# this assumes that you have a proper version of PyTorch already installed\n", + "pip install -q torchaudio" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c7c79af4", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "torch.set_num_threads(1)\n", + "\n", + "from IPython.display import Audio\n", + "from pprint import pprint\n", + "# download example\n", + "torch.hub.download_url_to_file('https://models.silero.ai/vad_models/en.wav', 'en_example.wav')\n", + "\n", + "model, utils = torch.hub.load(repo_or_dir='snakers4/silero-vad',\n", + " model='silero_vad',\n", + " force_reload=True)\n", + "\n", + "(get_speech_timestamps,\n", + " _, read_audio,\n", + " *_) = utils\n", + "\n", + "sampling_rate = 16000 # also accepts 8000\n", + "wav = read_audio('en_example.wav', sampling_rate=sampling_rate)\n", + "# get speech timestamps from full audio file\n", + "speech_timestamps = get_speech_timestamps(wav, model, sampling_rate=sampling_rate)\n", + "pprint(speech_timestamps)" + ] + }, + { + "cell_type": "markdown", + "id": "424fec64", + "metadata": {}, + "source": [ + "### Model Description\n", + "\n", + "Silero VAD: pre-trained enterprise-grade Voice Activity Detector (VAD). Enterprise-grade Speech Products made refreshingly simple (see our STT models). **Each model is published separately**.\n", + "\n", + "Currently, there are hardly any high quality / modern / free / public voice activity detectors except for WebRTC Voice Activity Detector (link). WebRTC though starts to show its age and it suffers from many false positives.\n", + "\n", + "**(!!!) Important Notice (!!!)** - the models are intended to run on CPU only and were optimized for performance on 1 CPU thread. Note that the model is quantized.\n", + "\n", + "\n", + "### Additional Examples and Benchmarks\n", + "\n", + "For additional examples and other model formats please visit this [link](https://github.com/snakers4/silero-vad) and please refer to the extensive examples in the Colab format (including the streaming examples).\n", + "\n", + "### References\n", + "\n", + "VAD model architectures are based on similar STT architectures.\n", + "\n", + "- [Silero VAD](https://github.com/snakers4/silero-vad)\n", + "- [Alexander Veysov, \"Toward's an ImageNet Moment for Speech-to-Text\", The Gradient, 2020](https://thegradient.pub/towards-an-imagenet-moment-for-speech-to-text/)\n", + "- [Alexander Veysov, \"A Speech-To-Text Practitioner’s Criticisms of Industry and Academia\", The Gradient, 2020](https://thegradient.pub/a-speech-to-text-practitioners-criticisms-of-industry-and-academia/)" + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/hub/ultralytics_yolov5.ipynb b/assets/hub/ultralytics_yolov5.ipynb new file mode 100644 index 000000000000..de6b00dcbc7f --- /dev/null +++ b/assets/hub/ultralytics_yolov5.ipynb @@ -0,0 +1,167 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "8cf75b27", + "metadata": {}, + "source": [ + "### This notebook is optionally accelerated with a GPU runtime.\n", + "### If you would like to use this acceleration, please select the menu option \"Runtime\" -> \"Change runtime type\", select \"Hardware Accelerator\" -> \"GPU\" and click \"SAVE\"\n", + "\n", + "----------------------------------------------------------------------\n", + "\n", + "# YOLOv5\n", + "\n", + "*Author: Ultralytics*\n", + "\n", + "**Ultralytics YOLOv5 🚀 for object detection, instance segmentation and image classification.**\n", + "\n", + "_ | _\n", + "- | -\n", + "![alt](https://pytorch.org/assets/images/ultralytics_yolov5_img1.png) | ![alt](https://pytorch.org/assets/images/ultralytics_yolov5_img2.png)\n", + "\n", + "\n", + "## Before You Start\n", + "\n", + "Start from a **Python>=3.8** environment with **PyTorch>=1.7** installed. To install PyTorch see [https://pytorch.org/get-started/locally/](https://pytorch.org/get-started/locally/). To install YOLOv5 dependencies:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "32d11b6e", + "metadata": {}, + "outputs": [], + "source": [ + "%%bash\n", + "pip install -U ultralytics" + ] + }, + { + "cell_type": "markdown", + "id": "f2e6d5dc", + "metadata": {}, + "source": [ + "## Model Description\n", + "\n", + "\"YOLO\n", + "\n", + "Ultralytics YOLOv5 🚀 is a cutting-edge, state-of-the-art (SOTA) model that builds upon the success of previous YOLO versions and introduces new features and improvements to further boost performance and flexibility. YOLOv5 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection, instance segmentation and image classification tasks.\n", + "\n", + "We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 [Docs](https://docs.ultralytics.com/yolov5) for details, raise an issue on [GitHub](https://github.com/ultralytics/yolov5/issues/new/choose) for support, and join our [Discord](https://discord.gg/n6cFeSPZdD) community for questions and discussions!\n", + "\n", + "| Model | size
        (pixels) | mAPval
        50-95 | mAPval
        50 | Speed
        CPU b1
        (ms) | Speed
        V100 b1
        (ms) | Speed
        V100 b32
        (ms) | params
        (M) | FLOPs
        @640 (B) |\n", + "|-------------------------------------------------------------------------------------------------|-----------------------|----------------------|-------------------|------------------------------|-------------------------------|--------------------------------|--------------------|------------------------|\n", + "| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** |\n", + "| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 |\n", + "| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 |\n", + "| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 |\n", + "| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 |\n", + "| | | | | | | | | |\n", + "| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 |\n", + "| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 |\n", + "| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 |\n", + "| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 |\n", + "| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)
        + [TTA] | 1280
        1536 | 55.0
        **55.8** | 72.7
        **72.7** | 3136
        - | 26.2
        - | 19.4
        - | 140.7
        - | 209.8
        - |\n", + "\n", + "
        \n", + " Table Notes\n", + "\n", + "- All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml).\n", + "- **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
        Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65`\n", + "- **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.
        Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1`\n", + "- **TTA** [Test Time Augmentation](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) includes reflection and scale augmentations.
        Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`\n", + "\n", + "
        \n", + "\n", + "## Load From PyTorch Hub\n", + "\n", + "This example loads a pretrained **YOLOv5s** model and passes an image for inference. YOLOv5 accepts **URL**, **Filename**, **PIL**, **OpenCV**, **Numpy** and **PyTorch** inputs, and returns detections in **torch**, **pandas**, and **JSON** output formats. See the [YOLOv5 PyTorch Hub Tutorial](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/) for details." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6966e6c0", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "\n", + "# Model\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)\n", + "\n", + "# Images\n", + "imgs = ['https://ultralytics.com/images/zidane.jpg'] # batch of images\n", + "\n", + "# Inference\n", + "results = model(imgs)\n", + "\n", + "# Results\n", + "results.print()\n", + "results.save() # or .show()\n", + "\n", + "results.xyxy[0] # img1 predictions (tensor)\n", + "results.pandas().xyxy[0] # img1 predictions (pandas)\n", + "# xmin ymin xmax ymax confidence class name\n", + "# 0 749.50 43.50 1148.0 704.5 0.874023 0 person\n", + "# 1 433.50 433.50 517.5 714.5 0.687988 27 tie\n", + "# 2 114.75 195.75 1095.0 708.0 0.624512 0 person\n", + "# 3 986.00 304.00 1028.0 420.0 0.286865 27 tie" + ] + }, + { + "cell_type": "markdown", + "id": "77ca7d3d", + "metadata": {}, + "source": [ + "## Citation\n", + "\n", + "If you use YOLOv5 or YOLOv5u in your research, please cite the Ultralytics YOLOv5 repository as follows:\n", + "\n", + "[![DOI](https://zenodo.org/badge/264818686.svg)](https://zenodo.org/badge/latestdoi/264818686)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cc03910b", + "metadata": { + "attributes": { + "classes": [ + "bibtex" + ], + "id": "" + } + }, + "outputs": [], + "source": [ + "@software{yolov5,\n", + " title = {YOLOv5 by Ultralytics},\n", + " author = {Glenn Jocher},\n", + " year = {2020},\n", + " version = {7.0},\n", + " license = {AGPL-3.0},\n", + " url = {https://github.com/ultralytics/yolov5},\n", + " doi = {10.5281/zenodo.3908559},\n", + " orcid = {0000-0001-5950-6979}\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "6086f21d", + "metadata": {}, + "source": [ + "## Contact\n", + "\n", + "For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://discord.gg/n6cFeSPZdD) community for questions and discussions!\n", + "\n", + " " + ] + } + ], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/assets/images/Cub200Dataset.png b/assets/images/Cub200Dataset.png new file mode 100644 index 000000000000..ead780b0d8ac Binary files /dev/null and b/assets/images/Cub200Dataset.png differ diff --git a/assets/images/MEALV2.png b/assets/images/MEALV2.png new file mode 100644 index 000000000000..b4e8b2088599 Binary files /dev/null and b/assets/images/MEALV2.png differ diff --git a/assets/images/MEALV2_method.png b/assets/images/MEALV2_method.png new file mode 100644 index 000000000000..02f7668d4a8c Binary files /dev/null and b/assets/images/MEALV2_method.png differ diff --git a/assets/images/MEALV2_results.png b/assets/images/MEALV2_results.png new file mode 100644 index 000000000000..947734e7044c Binary files /dev/null and b/assets/images/MEALV2_results.png differ diff --git a/assets/images/ResNeXtArch.png b/assets/images/ResNeXtArch.png new file mode 100644 index 000000000000..b75d41b64af5 Binary files /dev/null and b/assets/images/ResNeXtArch.png differ diff --git a/assets/images/SEArch.png b/assets/images/SEArch.png new file mode 100755 index 000000000000..a7fb8d047226 Binary files /dev/null and b/assets/images/SEArch.png differ diff --git a/assets/images/classification.jpg b/assets/images/classification.jpg new file mode 100644 index 000000000000..eb1e20641c3c Binary files /dev/null and b/assets/images/classification.jpg differ diff --git a/assets/images/dog.jpg b/assets/images/dog.jpg new file mode 100644 index 000000000000..12f0e0dd1162 Binary files /dev/null and b/assets/images/dog.jpg differ diff --git a/assets/images/fastpitch_model.png b/assets/images/fastpitch_model.png new file mode 100644 index 000000000000..f828877edfdd Binary files /dev/null and b/assets/images/fastpitch_model.png differ diff --git a/assets/images/ghostnet.png b/assets/images/ghostnet.png new file mode 100644 index 000000000000..b91337e2aea3 Binary files /dev/null and b/assets/images/ghostnet.png differ diff --git a/assets/images/hifigan_model.png b/assets/images/hifigan_model.png new file mode 100644 index 000000000000..9ba92bb6a5e8 Binary files /dev/null and b/assets/images/hifigan_model.png differ diff --git a/assets/images/hybridnets.jpg b/assets/images/hybridnets.jpg new file mode 100644 index 000000000000..ee053ce4f549 Binary files /dev/null and b/assets/images/hybridnets.jpg differ diff --git a/assets/images/ibnnet.png b/assets/images/ibnnet.png new file mode 100644 index 000000000000..d6c0ce6006da Binary files /dev/null and b/assets/images/ibnnet.png differ diff --git a/assets/images/intel-logo.png b/assets/images/intel-logo.png new file mode 100644 index 000000000000..2d022a97c15a Binary files /dev/null and b/assets/images/intel-logo.png differ diff --git a/assets/images/midas_samples.png b/assets/images/midas_samples.png new file mode 100644 index 000000000000..921e290edbae Binary files /dev/null and b/assets/images/midas_samples.png differ diff --git a/assets/images/nts-net.png b/assets/images/nts-net.png new file mode 100644 index 000000000000..b7bd97b1ec70 Binary files /dev/null and b/assets/images/nts-net.png differ diff --git a/assets/images/ofa_imagenet_results.png b/assets/images/ofa_imagenet_results.png new file mode 100644 index 000000000000..46ceae12c0c5 Binary files /dev/null and b/assets/images/ofa_imagenet_results.png differ diff --git a/assets/images/once_for_all_overview.png b/assets/images/once_for_all_overview.png new file mode 100644 index 000000000000..555bf30cc5e1 Binary files /dev/null and b/assets/images/once_for_all_overview.png differ diff --git a/assets/images/resnest.jpg b/assets/images/resnest.jpg new file mode 100644 index 000000000000..994dc6ff00ee Binary files /dev/null and b/assets/images/resnest.jpg differ diff --git a/assets/images/sigsep_logo_inria.png b/assets/images/sigsep_logo_inria.png new file mode 100644 index 000000000000..066ea8861253 Binary files /dev/null and b/assets/images/sigsep_logo_inria.png differ diff --git a/assets/images/sigsep_umx-diagram.png b/assets/images/sigsep_umx-diagram.png new file mode 100644 index 000000000000..9cb5c4a3591d Binary files /dev/null and b/assets/images/sigsep_umx-diagram.png differ diff --git a/assets/images/silero_imagenet_moment.png b/assets/images/silero_imagenet_moment.png new file mode 100644 index 000000000000..faa16dc5ce49 Binary files /dev/null and b/assets/images/silero_imagenet_moment.png differ diff --git a/assets/images/silero_logo.jpg b/assets/images/silero_logo.jpg new file mode 100644 index 000000000000..0ced1942afa6 Binary files /dev/null and b/assets/images/silero_logo.jpg differ diff --git a/assets/images/silero_stt_model.jpg b/assets/images/silero_stt_model.jpg new file mode 100644 index 000000000000..2e67c11c2d31 Binary files /dev/null and b/assets/images/silero_stt_model.jpg differ diff --git a/assets/images/silero_vad_performance.png b/assets/images/silero_vad_performance.png new file mode 100644 index 000000000000..9d1d9f4f1479 Binary files /dev/null and b/assets/images/silero_vad_performance.png differ diff --git a/assets/images/simplenet.jpg b/assets/images/simplenet.jpg new file mode 100644 index 000000000000..e3bc71437dc9 Binary files /dev/null and b/assets/images/simplenet.jpg differ diff --git a/assets/images/slowfast.png b/assets/images/slowfast.png new file mode 100644 index 000000000000..c5f542a1f81e Binary files /dev/null and b/assets/images/slowfast.png differ diff --git a/assets/images/snnmlp.png b/assets/images/snnmlp.png new file mode 100644 index 000000000000..f08f8ea86f6d Binary files /dev/null and b/assets/images/snnmlp.png differ diff --git a/assets/images/ultralytics_yolov5_img0.jpg b/assets/images/ultralytics_yolov5_img0.jpg new file mode 100644 index 000000000000..b4147e36764a Binary files /dev/null and b/assets/images/ultralytics_yolov5_img0.jpg differ diff --git a/assets/images/ultralytics_yolov5_img1.png b/assets/images/ultralytics_yolov5_img1.png new file mode 100644 index 000000000000..73b996b237df Binary files /dev/null and b/assets/images/ultralytics_yolov5_img1.png differ diff --git a/assets/images/ultralytics_yolov5_img2.png b/assets/images/ultralytics_yolov5_img2.png new file mode 100644 index 000000000000..4e648fba938d Binary files /dev/null and b/assets/images/ultralytics_yolov5_img2.png differ diff --git a/assets/images/x3d.png b/assets/images/x3d.png new file mode 100644 index 000000000000..7f86e44b724f Binary files /dev/null and b/assets/images/x3d.png differ diff --git a/assets/images/yolop.png b/assets/images/yolop.png new file mode 100644 index 000000000000..1a6088452dc7 Binary files /dev/null and b/assets/images/yolop.png differ diff --git a/assets/main.css b/assets/main.css new file mode 100644 index 000000000000..aa7eb1db6cdd --- /dev/null +++ b/assets/main.css @@ -0,0 +1,6 @@ +/*! + * Bootstrap v4.3.1 (https://getbootstrap.com/) + * Copyright 2011-2019 The Bootstrap Authors + * Copyright 2011-2019 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + */:root{--blue: #007bff;--indigo: #6610f2;--purple: #6f42c1;--pink: #e83e8c;--red: #dc3545;--orange: #fd7e14;--yellow: #ffc107;--green: #28a745;--teal: #20c997;--cyan: #17a2b8;--white: #fff;--gray: #6c757d;--gray-dark: #343a40;--primary: #007bff;--secondary: #6c757d;--success: #28a745;--info: #17a2b8;--warning: #ffc107;--danger: #dc3545;--light: #f8f9fa;--dark: #343a40;--breakpoint-xs: 0;--breakpoint-sm: 576px;--breakpoint-md: 768px;--breakpoint-lg: 992px;--breakpoint-xl: 1200px;--font-family-sans-serif: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";--font-family-monospace: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace}*,*::before,*::after{box-sizing:border-box}html{font-family:sans-serif;line-height:1.15;-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:transparent}article,aside,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}body{margin:0;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";font-size:1rem;font-weight:400;line-height:1.5;color:#212529;text-align:left;background-color:#fff}[tabindex="-1"]:focus{outline:0 !important}hr{box-sizing:content-box;height:0;overflow:visible}h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:.5rem}p{margin-top:0;margin-bottom:1rem}abbr[title],abbr[data-original-title]{text-decoration:underline;-webkit-text-decoration:underline dotted;text-decoration:underline dotted;cursor:help;border-bottom:0;-webkit-text-decoration-skip-ink:none;text-decoration-skip-ink:none}address{margin-bottom:1rem;font-style:normal;line-height:inherit}ol,ul,dl{margin-top:0;margin-bottom:1rem}ol ol,ul ul,ol ul,ul ol{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem}b,strong{font-weight:bolder}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}a{color:#007bff;text-decoration:none;background-color:transparent}a:hover{color:#0056b3;text-decoration:underline}a:not([href]):not([tabindex]){color:inherit;text-decoration:none}a:not([href]):not([tabindex]):hover,a:not([href]):not([tabindex]):focus{color:inherit;text-decoration:none}a:not([href]):not([tabindex]):focus{outline:0}pre,code,kbd,samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;font-size:1em}pre{margin-top:0;margin-bottom:1rem;overflow:auto}figure{margin:0 0 1rem}img{vertical-align:middle;border-style:none}svg{overflow:hidden;vertical-align:middle}table{border-collapse:collapse}caption{padding-top:.75rem;padding-bottom:.75rem;color:#6c757d;text-align:left;caption-side:bottom}th{text-align:inherit}label{display:inline-block;margin-bottom:.5rem}button{border-radius:0}button:focus{outline:1px dotted;outline:5px auto -webkit-focus-ring-color}input,button,select,optgroup,textarea{margin:0;font-family:inherit;font-size:inherit;line-height:inherit}button,input{overflow:visible}button,select{text-transform:none}select{word-wrap:normal}button,[type="button"],[type="reset"],[type="submit"]{-webkit-appearance:button}button:not(:disabled),[type="button"]:not(:disabled),[type="reset"]:not(:disabled),[type="submit"]:not(:disabled){cursor:pointer}button::-moz-focus-inner,[type="button"]::-moz-focus-inner,[type="reset"]::-moz-focus-inner,[type="submit"]::-moz-focus-inner{padding:0;border-style:none}input[type="radio"],input[type="checkbox"]{box-sizing:border-box;padding:0}input[type="date"],input[type="time"],input[type="datetime-local"],input[type="month"]{-webkit-appearance:listbox}textarea{overflow:auto;resize:vertical}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;max-width:100%;padding:0;margin-bottom:.5rem;font-size:1.5rem;line-height:inherit;color:inherit;white-space:normal}progress{vertical-align:baseline}[type="number"]::-webkit-inner-spin-button,[type="number"]::-webkit-outer-spin-button{height:auto}[type="search"]{outline-offset:-2px;-webkit-appearance:none}[type="search"]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{font:inherit;-webkit-appearance:button}output{display:inline-block}summary{display:list-item;cursor:pointer}template{display:none}[hidden]{display:none !important}h1,h2,h3,h4,h5,h6,.h1,.h2,.h3,.h4,.h5,.h6{margin-bottom:.5rem;font-weight:500;line-height:1.2}h1,.h1{font-size:2.5rem}h2,.h2{font-size:2rem}h3,.h3{font-size:1.75rem}h4,.h4{font-size:1.5rem}h5,.h5{font-size:1.25rem}h6,.h6{font-size:1rem}.lead{font-size:1.25rem;font-weight:300}.display-1{font-size:6rem;font-weight:300;line-height:1.2}.display-2{font-size:5.5rem;font-weight:300;line-height:1.2}.display-3{font-size:4.5rem;font-weight:300;line-height:1.2}.display-4{font-size:3.5rem;font-weight:300;line-height:1.2}hr{margin-top:1rem;margin-bottom:1rem;border:0;border-top:1px solid rgba(0,0,0,0.1)}small,.small{font-size:80%;font-weight:400}mark,.mark{padding:.2em;background-color:#fcf8e3}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none}.list-inline-item{display:inline-block}.list-inline-item:not(:last-child){margin-right:.5rem}.initialism{font-size:90%;text-transform:uppercase}.blockquote{margin-bottom:1rem;font-size:1.25rem}.blockquote-footer{display:block;font-size:80%;color:#6c757d}.blockquote-footer::before{content:"\2014\00A0"}.img-fluid{max-width:100%;height:auto}.img-thumbnail{padding:.25rem;background-color:#fff;border:1px solid #dee2e6;border-radius:.25rem;max-width:100%;height:auto}.figure{display:inline-block}.figure-img{margin-bottom:.5rem;line-height:1}.figure-caption{font-size:90%;color:#6c757d}code{font-size:87.5%;color:#e83e8c;word-break:break-word}a>code{color:inherit}kbd{padding:.2rem .4rem;font-size:87.5%;color:#fff;background-color:#212529;border-radius:.2rem}kbd kbd{padding:0;font-size:100%;font-weight:700}pre{display:block;font-size:87.5%;color:#212529}pre code{font-size:inherit;color:inherit;word-break:normal}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{width:100%;padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width: 576px){.container{max-width:540px}}@media (min-width: 768px){.container{max-width:720px}}@media (min-width: 992px){.container{max-width:960px}}@media (min-width: 1200px){.container{max-width:1140px}}.container-fluid{width:100%;padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.row{display:flex;flex-wrap:wrap;margin-right:-15px;margin-left:-15px}.no-gutters{margin-right:0;margin-left:0}.no-gutters>.col,.no-gutters>[class*="col-"]{padding-right:0;padding-left:0}.col-1,.col-2,.col-3,.col-4,.col-5,.col-6,.col-7,.col-8,.col-9,.col-10,.col-11,.col-12,.col,.col-auto,.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm,.col-sm-auto,.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12,.col-md,.col-md-auto,.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg,.col-lg-auto,.col-xl-1,.col-xl-2,.col-xl-3,.col-xl-4,.col-xl-5,.col-xl-6,.col-xl-7,.col-xl-8,.col-xl-9,.col-xl-10,.col-xl-11,.col-xl-12,.col-xl,.col-xl-auto{position:relative;width:100%;padding-right:15px;padding-left:15px}.col{flex-basis:0;flex-grow:1;max-width:100%}.col-auto{flex:0 0 auto;width:auto;max-width:100%}.col-1{flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-2{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-3{flex:0 0 25%;max-width:25%}.col-4{flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-5{flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-6{flex:0 0 50%;max-width:50%}.col-7{flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-8{flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-9{flex:0 0 75%;max-width:75%}.col-10{flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-11{flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-12{flex:0 0 100%;max-width:100%}.order-first{order:-1}.order-last{order:13}.order-0{order:0}.order-1{order:1}.order-2{order:2}.order-3{order:3}.order-4{order:4}.order-5{order:5}.order-6{order:6}.order-7{order:7}.order-8{order:8}.order-9{order:9}.order-10{order:10}.order-11{order:11}.order-12{order:12}.offset-1{margin-left:8.3333333333%}.offset-2{margin-left:16.6666666667%}.offset-3{margin-left:25%}.offset-4{margin-left:33.3333333333%}.offset-5{margin-left:41.6666666667%}.offset-6{margin-left:50%}.offset-7{margin-left:58.3333333333%}.offset-8{margin-left:66.6666666667%}.offset-9{margin-left:75%}.offset-10{margin-left:83.3333333333%}.offset-11{margin-left:91.6666666667%}@media (min-width: 576px){.col-sm{flex-basis:0;flex-grow:1;max-width:100%}.col-sm-auto{flex:0 0 auto;width:auto;max-width:100%}.col-sm-1{flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-sm-2{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-sm-3{flex:0 0 25%;max-width:25%}.col-sm-4{flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-sm-5{flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-sm-6{flex:0 0 50%;max-width:50%}.col-sm-7{flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-sm-8{flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-sm-9{flex:0 0 75%;max-width:75%}.col-sm-10{flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-sm-11{flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-sm-12{flex:0 0 100%;max-width:100%}.order-sm-first{order:-1}.order-sm-last{order:13}.order-sm-0{order:0}.order-sm-1{order:1}.order-sm-2{order:2}.order-sm-3{order:3}.order-sm-4{order:4}.order-sm-5{order:5}.order-sm-6{order:6}.order-sm-7{order:7}.order-sm-8{order:8}.order-sm-9{order:9}.order-sm-10{order:10}.order-sm-11{order:11}.order-sm-12{order:12}.offset-sm-0{margin-left:0}.offset-sm-1{margin-left:8.3333333333%}.offset-sm-2{margin-left:16.6666666667%}.offset-sm-3{margin-left:25%}.offset-sm-4{margin-left:33.3333333333%}.offset-sm-5{margin-left:41.6666666667%}.offset-sm-6{margin-left:50%}.offset-sm-7{margin-left:58.3333333333%}.offset-sm-8{margin-left:66.6666666667%}.offset-sm-9{margin-left:75%}.offset-sm-10{margin-left:83.3333333333%}.offset-sm-11{margin-left:91.6666666667%}}@media (min-width: 768px){.col-md{flex-basis:0;flex-grow:1;max-width:100%}.col-md-auto{flex:0 0 auto;width:auto;max-width:100%}.col-md-1{flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-md-2{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-md-3{flex:0 0 25%;max-width:25%}.col-md-4{flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-md-5{flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-md-6{flex:0 0 50%;max-width:50%}.col-md-7{flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-md-8{flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-md-9{flex:0 0 75%;max-width:75%}.col-md-10{flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-md-11{flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-md-12{flex:0 0 100%;max-width:100%}.order-md-first{order:-1}.order-md-last{order:13}.order-md-0{order:0}.order-md-1{order:1}.order-md-2{order:2}.order-md-3{order:3}.order-md-4{order:4}.order-md-5{order:5}.order-md-6{order:6}.order-md-7{order:7}.order-md-8{order:8}.order-md-9{order:9}.order-md-10{order:10}.order-md-11{order:11}.order-md-12{order:12}.offset-md-0{margin-left:0}.offset-md-1{margin-left:8.3333333333%}.offset-md-2{margin-left:16.6666666667%}.offset-md-3{margin-left:25%}.offset-md-4{margin-left:33.3333333333%}.offset-md-5{margin-left:41.6666666667%}.offset-md-6{margin-left:50%}.offset-md-7{margin-left:58.3333333333%}.offset-md-8{margin-left:66.6666666667%}.offset-md-9{margin-left:75%}.offset-md-10{margin-left:83.3333333333%}.offset-md-11{margin-left:91.6666666667%}}@media (min-width: 992px){.col-lg{flex-basis:0;flex-grow:1;max-width:100%}.col-lg-auto{flex:0 0 auto;width:auto;max-width:100%}.col-lg-1{flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-lg-2{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-lg-3{flex:0 0 25%;max-width:25%}.col-lg-4{flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-lg-5{flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-lg-6{flex:0 0 50%;max-width:50%}.col-lg-7{flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-lg-8{flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-lg-9{flex:0 0 75%;max-width:75%}.col-lg-10{flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-lg-11{flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-lg-12{flex:0 0 100%;max-width:100%}.order-lg-first{order:-1}.order-lg-last{order:13}.order-lg-0{order:0}.order-lg-1{order:1}.order-lg-2{order:2}.order-lg-3{order:3}.order-lg-4{order:4}.order-lg-5{order:5}.order-lg-6{order:6}.order-lg-7{order:7}.order-lg-8{order:8}.order-lg-9{order:9}.order-lg-10{order:10}.order-lg-11{order:11}.order-lg-12{order:12}.offset-lg-0{margin-left:0}.offset-lg-1{margin-left:8.3333333333%}.offset-lg-2{margin-left:16.6666666667%}.offset-lg-3{margin-left:25%}.offset-lg-4{margin-left:33.3333333333%}.offset-lg-5{margin-left:41.6666666667%}.offset-lg-6{margin-left:50%}.offset-lg-7{margin-left:58.3333333333%}.offset-lg-8{margin-left:66.6666666667%}.offset-lg-9{margin-left:75%}.offset-lg-10{margin-left:83.3333333333%}.offset-lg-11{margin-left:91.6666666667%}}@media (min-width: 1200px){.col-xl{flex-basis:0;flex-grow:1;max-width:100%}.col-xl-auto{flex:0 0 auto;width:auto;max-width:100%}.col-xl-1{flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-xl-2{flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-xl-3{flex:0 0 25%;max-width:25%}.col-xl-4{flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-xl-5{flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-xl-6{flex:0 0 50%;max-width:50%}.col-xl-7{flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-xl-8{flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-xl-9{flex:0 0 75%;max-width:75%}.col-xl-10{flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-xl-11{flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-xl-12{flex:0 0 100%;max-width:100%}.order-xl-first{order:-1}.order-xl-last{order:13}.order-xl-0{order:0}.order-xl-1{order:1}.order-xl-2{order:2}.order-xl-3{order:3}.order-xl-4{order:4}.order-xl-5{order:5}.order-xl-6{order:6}.order-xl-7{order:7}.order-xl-8{order:8}.order-xl-9{order:9}.order-xl-10{order:10}.order-xl-11{order:11}.order-xl-12{order:12}.offset-xl-0{margin-left:0}.offset-xl-1{margin-left:8.3333333333%}.offset-xl-2{margin-left:16.6666666667%}.offset-xl-3{margin-left:25%}.offset-xl-4{margin-left:33.3333333333%}.offset-xl-5{margin-left:41.6666666667%}.offset-xl-6{margin-left:50%}.offset-xl-7{margin-left:58.3333333333%}.offset-xl-8{margin-left:66.6666666667%}.offset-xl-9{margin-left:75%}.offset-xl-10{margin-left:83.3333333333%}.offset-xl-11{margin-left:91.6666666667%}}.table{width:100%;margin-bottom:1rem;color:#212529}.table th,.table td{padding:.75rem;vertical-align:top;border-top:1px solid #dee2e6}.table thead th{vertical-align:bottom;border-bottom:2px solid #dee2e6}.table tbody+tbody{border-top:2px solid #dee2e6}.table-sm th,.table-sm td{padding:.3rem}.table-bordered{border:1px solid #dee2e6}.table-bordered th,.table-bordered td{border:1px solid #dee2e6}.table-bordered thead th,.table-bordered thead td{border-bottom-width:2px}.table-borderless th,.table-borderless td,.table-borderless thead th,.table-borderless tbody+tbody{border:0}.table-striped tbody tr:nth-of-type(odd){background-color:rgba(0,0,0,0.05)}.table-hover tbody tr:hover{color:#212529;background-color:rgba(0,0,0,0.075)}.table-primary,.table-primary>th,.table-primary>td{background-color:#b8daff}.table-primary th,.table-primary td,.table-primary thead th,.table-primary tbody+tbody{border-color:#7abaff}.table-hover .table-primary:hover{background-color:#9fcdff}.table-hover .table-primary:hover>td,.table-hover .table-primary:hover>th{background-color:#9fcdff}.table-secondary,.table-secondary>th,.table-secondary>td{background-color:#d6d8db}.table-secondary th,.table-secondary td,.table-secondary thead th,.table-secondary tbody+tbody{border-color:#b3b7bb}.table-hover .table-secondary:hover{background-color:#c8cbcf}.table-hover .table-secondary:hover>td,.table-hover .table-secondary:hover>th{background-color:#c8cbcf}.table-success,.table-success>th,.table-success>td{background-color:#c3e6cb}.table-success th,.table-success td,.table-success thead th,.table-success tbody+tbody{border-color:#8fd19e}.table-hover .table-success:hover{background-color:#b1dfbb}.table-hover .table-success:hover>td,.table-hover .table-success:hover>th{background-color:#b1dfbb}.table-info,.table-info>th,.table-info>td{background-color:#bee5eb}.table-info th,.table-info td,.table-info thead th,.table-info tbody+tbody{border-color:#86cfda}.table-hover .table-info:hover{background-color:#abdde5}.table-hover .table-info:hover>td,.table-hover .table-info:hover>th{background-color:#abdde5}.table-warning,.table-warning>th,.table-warning>td{background-color:#ffeeba}.table-warning th,.table-warning td,.table-warning thead th,.table-warning tbody+tbody{border-color:#ffdf7e}.table-hover .table-warning:hover{background-color:#ffe8a1}.table-hover .table-warning:hover>td,.table-hover .table-warning:hover>th{background-color:#ffe8a1}.table-danger,.table-danger>th,.table-danger>td{background-color:#f5c6cb}.table-danger th,.table-danger td,.table-danger thead th,.table-danger tbody+tbody{border-color:#ed969e}.table-hover .table-danger:hover{background-color:#f1b0b7}.table-hover .table-danger:hover>td,.table-hover .table-danger:hover>th{background-color:#f1b0b7}.table-light,.table-light>th,.table-light>td{background-color:#fdfdfe}.table-light th,.table-light td,.table-light thead th,.table-light tbody+tbody{border-color:#fbfcfc}.table-hover .table-light:hover{background-color:#ececf6}.table-hover .table-light:hover>td,.table-hover .table-light:hover>th{background-color:#ececf6}.table-dark,.table-dark>th,.table-dark>td{background-color:#c6c8ca}.table-dark th,.table-dark td,.table-dark thead th,.table-dark tbody+tbody{border-color:#95999c}.table-hover .table-dark:hover{background-color:#b9bbbe}.table-hover .table-dark:hover>td,.table-hover .table-dark:hover>th{background-color:#b9bbbe}.table-active,.table-active>th,.table-active>td{background-color:rgba(0,0,0,0.075)}.table-hover .table-active:hover{background-color:rgba(0,0,0,0.075)}.table-hover .table-active:hover>td,.table-hover .table-active:hover>th{background-color:rgba(0,0,0,0.075)}.table .thead-dark th{color:#fff;background-color:#343a40;border-color:#454d55}.table .thead-light th{color:#495057;background-color:#e9ecef;border-color:#dee2e6}.table-dark{color:#fff;background-color:#343a40}.table-dark th,.table-dark td,.table-dark thead th{border-color:#454d55}.table-dark.table-bordered{border:0}.table-dark.table-striped tbody tr:nth-of-type(odd){background-color:rgba(255,255,255,0.05)}.table-dark.table-hover tbody tr:hover{color:#fff;background-color:rgba(255,255,255,0.075)}@media (max-width: 575.98px){.table-responsive-sm{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch}.table-responsive-sm>.table-bordered{border:0}}@media (max-width: 767.98px){.table-responsive-md{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch}.table-responsive-md>.table-bordered{border:0}}@media (max-width: 991.98px){.table-responsive-lg{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch}.table-responsive-lg>.table-bordered{border:0}}@media (max-width: 1199.98px){.table-responsive-xl{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch}.table-responsive-xl>.table-bordered{border:0}}.table-responsive{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch}.table-responsive>.table-bordered{border:0}.form-control{display:block;width:100%;height:calc(1.5em + .75rem + 2px);padding:.375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:#495057;background-color:#fff;background-clip:padding-box;border:1px solid #ced4da;border-radius:.25rem;transition:border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out}@media (prefers-reduced-motion: reduce){.form-control{transition:none}}.form-control::-ms-expand{background-color:transparent;border:0}.form-control:focus{color:#495057;background-color:#fff;border-color:#80bdff;outline:0;box-shadow:0 0 0 .2rem rgba(0,123,255,0.25)}.form-control::-moz-placeholder{color:#6c757d;opacity:1}.form-control:-ms-input-placeholder{color:#6c757d;opacity:1}.form-control::-ms-input-placeholder{color:#6c757d;opacity:1}.form-control::placeholder{color:#6c757d;opacity:1}.form-control:disabled,.form-control[readonly]{background-color:#e9ecef;opacity:1}select.form-control:focus::-ms-value{color:#495057;background-color:#fff}.form-control-file,.form-control-range{display:block;width:100%}.col-form-label{padding-top:calc(.375rem + 1px);padding-bottom:calc(.375rem + 1px);margin-bottom:0;font-size:inherit;line-height:1.5}.col-form-label-lg{padding-top:calc(.5rem + 1px);padding-bottom:calc(.5rem + 1px);font-size:1.25rem;line-height:1.5}.col-form-label-sm{padding-top:calc(.25rem + 1px);padding-bottom:calc(.25rem + 1px);font-size:.875rem;line-height:1.5}.form-control-plaintext{display:block;width:100%;padding-top:.375rem;padding-bottom:.375rem;margin-bottom:0;line-height:1.5;color:#212529;background-color:transparent;border:solid transparent;border-width:1px 0}.form-control-plaintext.form-control-sm,.form-control-plaintext.form-control-lg{padding-right:0;padding-left:0}.form-control-sm{height:calc(1.5em + .5rem + 2px);padding:.25rem .5rem;font-size:.875rem;line-height:1.5;border-radius:.2rem}.form-control-lg{height:calc(1.5em + 1rem + 2px);padding:.5rem 1rem;font-size:1.25rem;line-height:1.5;border-radius:.3rem}select.form-control[size],select.form-control[multiple]{height:auto}textarea.form-control{height:auto}.form-group{margin-bottom:1rem}.form-text{display:block;margin-top:.25rem}.form-row{display:flex;flex-wrap:wrap;margin-right:-5px;margin-left:-5px}.form-row>.col,.form-row>[class*="col-"]{padding-right:5px;padding-left:5px}.form-check{position:relative;display:block;padding-left:1.25rem}.form-check-input{position:absolute;margin-top:.3rem;margin-left:-1.25rem}.form-check-input:disabled ~ .form-check-label{color:#6c757d}.form-check-label{margin-bottom:0}.form-check-inline{display:inline-flex;align-items:center;padding-left:0;margin-right:.75rem}.form-check-inline .form-check-input{position:static;margin-top:0;margin-right:.3125rem;margin-left:0}.valid-feedback{display:none;width:100%;margin-top:.25rem;font-size:80%;color:#28a745}.valid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:.875rem;line-height:1.5;color:#fff;background-color:rgba(40,167,69,0.9);border-radius:.25rem}.was-validated .form-control:valid,.form-control.is-valid{border-color:#28a745;padding-right:calc(1.5em + .75rem);background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%2328a745' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3e%3c/svg%3e");background-repeat:no-repeat;background-position:center right calc(.375em + .1875rem);background-size:calc(.75em + .375rem) calc(.75em + .375rem)}.was-validated .form-control:valid:focus,.form-control.is-valid:focus{border-color:#28a745;box-shadow:0 0 0 .2rem rgba(40,167,69,0.25)}.was-validated .form-control:valid ~ .valid-feedback,.was-validated .form-control:valid ~ .valid-tooltip,.form-control.is-valid ~ .valid-feedback,.form-control.is-valid ~ .valid-tooltip{display:block}.was-validated textarea.form-control:valid,textarea.form-control.is-valid{padding-right:calc(1.5em + .75rem);background-position:top calc(.375em + .1875rem) right calc(.375em + .1875rem)}.was-validated .custom-select:valid,.custom-select.is-valid{border-color:#28a745;padding-right:calc((1em + .75rem) * 3 / 4 + 1.75rem);background:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 5'%3e%3cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3e%3c/svg%3e") no-repeat right .75rem center/8px 10px,url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%2328a745' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3e%3c/svg%3e") #fff no-repeat center right 1.75rem/calc(.75em + .375rem) calc(.75em + .375rem)}.was-validated .custom-select:valid:focus,.custom-select.is-valid:focus{border-color:#28a745;box-shadow:0 0 0 .2rem rgba(40,167,69,0.25)}.was-validated .custom-select:valid ~ .valid-feedback,.was-validated .custom-select:valid ~ .valid-tooltip,.custom-select.is-valid ~ .valid-feedback,.custom-select.is-valid ~ .valid-tooltip{display:block}.was-validated .form-control-file:valid ~ .valid-feedback,.was-validated .form-control-file:valid ~ .valid-tooltip,.form-control-file.is-valid ~ .valid-feedback,.form-control-file.is-valid ~ .valid-tooltip{display:block}.was-validated .form-check-input:valid ~ .form-check-label,.form-check-input.is-valid ~ .form-check-label{color:#28a745}.was-validated .form-check-input:valid ~ .valid-feedback,.was-validated .form-check-input:valid ~ .valid-tooltip,.form-check-input.is-valid ~ .valid-feedback,.form-check-input.is-valid ~ .valid-tooltip{display:block}.was-validated .custom-control-input:valid ~ .custom-control-label,.custom-control-input.is-valid ~ .custom-control-label{color:#28a745}.was-validated .custom-control-input:valid ~ .custom-control-label::before,.custom-control-input.is-valid ~ .custom-control-label::before{border-color:#28a745}.was-validated .custom-control-input:valid ~ .valid-feedback,.was-validated .custom-control-input:valid ~ .valid-tooltip,.custom-control-input.is-valid ~ .valid-feedback,.custom-control-input.is-valid ~ .valid-tooltip{display:block}.was-validated .custom-control-input:valid:checked ~ .custom-control-label::before,.custom-control-input.is-valid:checked ~ .custom-control-label::before{border-color:#34ce57;background-color:#34ce57}.was-validated .custom-control-input:valid:focus ~ .custom-control-label::before,.custom-control-input.is-valid:focus ~ .custom-control-label::before{box-shadow:0 0 0 .2rem rgba(40,167,69,0.25)}.was-validated .custom-control-input:valid:focus:not(:checked) ~ .custom-control-label::before,.custom-control-input.is-valid:focus:not(:checked) ~ .custom-control-label::before{border-color:#28a745}.was-validated .custom-file-input:valid ~ .custom-file-label,.custom-file-input.is-valid ~ .custom-file-label{border-color:#28a745}.was-validated .custom-file-input:valid ~ .valid-feedback,.was-validated .custom-file-input:valid ~ .valid-tooltip,.custom-file-input.is-valid ~ .valid-feedback,.custom-file-input.is-valid ~ .valid-tooltip{display:block}.was-validated .custom-file-input:valid:focus ~ .custom-file-label,.custom-file-input.is-valid:focus ~ .custom-file-label{border-color:#28a745;box-shadow:0 0 0 .2rem rgba(40,167,69,0.25)}.invalid-feedback{display:none;width:100%;margin-top:.25rem;font-size:80%;color:#dc3545}.invalid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:.875rem;line-height:1.5;color:#fff;background-color:rgba(220,53,69,0.9);border-radius:.25rem}.was-validated .form-control:invalid,.form-control.is-invalid{border-color:#dc3545;padding-right:calc(1.5em + .75rem);background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='%23dc3545' viewBox='-2 -2 7 7'%3e%3cpath stroke='%23dc3545' d='M0 0l3 3m0-3L0 3'/%3e%3ccircle r='.5'/%3e%3ccircle cx='3' r='.5'/%3e%3ccircle cy='3' r='.5'/%3e%3ccircle cx='3' cy='3' r='.5'/%3e%3c/svg%3E");background-repeat:no-repeat;background-position:center right calc(.375em + .1875rem);background-size:calc(.75em + .375rem) calc(.75em + .375rem)}.was-validated .form-control:invalid:focus,.form-control.is-invalid:focus{border-color:#dc3545;box-shadow:0 0 0 .2rem rgba(220,53,69,0.25)}.was-validated .form-control:invalid ~ .invalid-feedback,.was-validated .form-control:invalid ~ .invalid-tooltip,.form-control.is-invalid ~ .invalid-feedback,.form-control.is-invalid ~ .invalid-tooltip{display:block}.was-validated textarea.form-control:invalid,textarea.form-control.is-invalid{padding-right:calc(1.5em + .75rem);background-position:top calc(.375em + .1875rem) right calc(.375em + .1875rem)}.was-validated .custom-select:invalid,.custom-select.is-invalid{border-color:#dc3545;padding-right:calc((1em + .75rem) * 3 / 4 + 1.75rem);background:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 5'%3e%3cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3e%3c/svg%3e") no-repeat right .75rem center/8px 10px,url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='%23dc3545' viewBox='-2 -2 7 7'%3e%3cpath stroke='%23dc3545' d='M0 0l3 3m0-3L0 3'/%3e%3ccircle r='.5'/%3e%3ccircle cx='3' r='.5'/%3e%3ccircle cy='3' r='.5'/%3e%3ccircle cx='3' cy='3' r='.5'/%3e%3c/svg%3E") #fff no-repeat center right 1.75rem/calc(.75em + .375rem) calc(.75em + .375rem)}.was-validated .custom-select:invalid:focus,.custom-select.is-invalid:focus{border-color:#dc3545;box-shadow:0 0 0 .2rem rgba(220,53,69,0.25)}.was-validated .custom-select:invalid ~ .invalid-feedback,.was-validated .custom-select:invalid ~ .invalid-tooltip,.custom-select.is-invalid ~ .invalid-feedback,.custom-select.is-invalid ~ .invalid-tooltip{display:block}.was-validated .form-control-file:invalid ~ .invalid-feedback,.was-validated .form-control-file:invalid ~ .invalid-tooltip,.form-control-file.is-invalid ~ .invalid-feedback,.form-control-file.is-invalid ~ .invalid-tooltip{display:block}.was-validated .form-check-input:invalid ~ .form-check-label,.form-check-input.is-invalid ~ .form-check-label{color:#dc3545}.was-validated .form-check-input:invalid ~ .invalid-feedback,.was-validated .form-check-input:invalid ~ .invalid-tooltip,.form-check-input.is-invalid ~ .invalid-feedback,.form-check-input.is-invalid ~ .invalid-tooltip{display:block}.was-validated .custom-control-input:invalid ~ .custom-control-label,.custom-control-input.is-invalid ~ .custom-control-label{color:#dc3545}.was-validated .custom-control-input:invalid ~ .custom-control-label::before,.custom-control-input.is-invalid ~ .custom-control-label::before{border-color:#dc3545}.was-validated .custom-control-input:invalid ~ .invalid-feedback,.was-validated .custom-control-input:invalid ~ .invalid-tooltip,.custom-control-input.is-invalid ~ .invalid-feedback,.custom-control-input.is-invalid ~ .invalid-tooltip{display:block}.was-validated .custom-control-input:invalid:checked ~ .custom-control-label::before,.custom-control-input.is-invalid:checked ~ .custom-control-label::before{border-color:#e4606d;background-color:#e4606d}.was-validated .custom-control-input:invalid:focus ~ .custom-control-label::before,.custom-control-input.is-invalid:focus ~ .custom-control-label::before{box-shadow:0 0 0 .2rem rgba(220,53,69,0.25)}.was-validated .custom-control-input:invalid:focus:not(:checked) ~ .custom-control-label::before,.custom-control-input.is-invalid:focus:not(:checked) ~ .custom-control-label::before{border-color:#dc3545}.was-validated .custom-file-input:invalid ~ .custom-file-label,.custom-file-input.is-invalid ~ .custom-file-label{border-color:#dc3545}.was-validated .custom-file-input:invalid ~ .invalid-feedback,.was-validated .custom-file-input:invalid ~ .invalid-tooltip,.custom-file-input.is-invalid ~ .invalid-feedback,.custom-file-input.is-invalid ~ .invalid-tooltip{display:block}.was-validated .custom-file-input:invalid:focus ~ .custom-file-label,.custom-file-input.is-invalid:focus ~ .custom-file-label{border-color:#dc3545;box-shadow:0 0 0 .2rem rgba(220,53,69,0.25)}.form-inline{display:flex;flex-flow:row wrap;align-items:center}.form-inline .form-check{width:100%}@media (min-width: 576px){.form-inline label{display:flex;align-items:center;justify-content:center;margin-bottom:0}.form-inline .form-group{display:flex;flex:0 0 auto;flex-flow:row wrap;align-items:center;margin-bottom:0}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-plaintext{display:inline-block}.form-inline .input-group,.form-inline .custom-select{width:auto}.form-inline .form-check{display:flex;align-items:center;justify-content:center;width:auto;padding-left:0}.form-inline .form-check-input{position:relative;flex-shrink:0;margin-top:0;margin-right:.25rem;margin-left:0}.form-inline .custom-control{align-items:center;justify-content:center}.form-inline .custom-control-label{margin-bottom:0}}.btn{display:inline-block;font-weight:400;color:#212529;text-align:center;vertical-align:middle;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-color:transparent;border:1px solid transparent;padding:.375rem .75rem;font-size:1rem;line-height:1.5;border-radius:.25rem;transition:color 0.15s ease-in-out,background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out}@media (prefers-reduced-motion: reduce){.btn{transition:none}}.btn:hover{color:#212529;text-decoration:none}.btn:focus,.btn.focus{outline:0;box-shadow:0 0 0 .2rem rgba(0,123,255,0.25)}.btn.disabled,.btn:disabled{opacity:.65}a.btn.disabled,fieldset:disabled a.btn{pointer-events:none}.btn-primary{color:#fff;background-color:#007bff;border-color:#007bff}.btn-primary:hover{color:#fff;background-color:#0069d9;border-color:#0062cc}.btn-primary:focus,.btn-primary.focus{box-shadow:0 0 0 .2rem rgba(38,143,255,0.5)}.btn-primary.disabled,.btn-primary:disabled{color:#fff;background-color:#007bff;border-color:#007bff}.btn-primary:not(:disabled):not(.disabled):active,.btn-primary:not(:disabled):not(.disabled).active,.show>.btn-primary.dropdown-toggle{color:#fff;background-color:#0062cc;border-color:#005cbf}.btn-primary:not(:disabled):not(.disabled):active:focus,.btn-primary:not(:disabled):not(.disabled).active:focus,.show>.btn-primary.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(38,143,255,0.5)}.btn-secondary{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-secondary:hover{color:#fff;background-color:#5a6268;border-color:#545b62}.btn-secondary:focus,.btn-secondary.focus{box-shadow:0 0 0 .2rem rgba(130,138,145,0.5)}.btn-secondary.disabled,.btn-secondary:disabled{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-secondary:not(:disabled):not(.disabled):active,.btn-secondary:not(:disabled):not(.disabled).active,.show>.btn-secondary.dropdown-toggle{color:#fff;background-color:#545b62;border-color:#4e555b}.btn-secondary:not(:disabled):not(.disabled):active:focus,.btn-secondary:not(:disabled):not(.disabled).active:focus,.show>.btn-secondary.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(130,138,145,0.5)}.btn-success{color:#fff;background-color:#28a745;border-color:#28a745}.btn-success:hover{color:#fff;background-color:#218838;border-color:#1e7e34}.btn-success:focus,.btn-success.focus{box-shadow:0 0 0 .2rem rgba(72,180,97,0.5)}.btn-success.disabled,.btn-success:disabled{color:#fff;background-color:#28a745;border-color:#28a745}.btn-success:not(:disabled):not(.disabled):active,.btn-success:not(:disabled):not(.disabled).active,.show>.btn-success.dropdown-toggle{color:#fff;background-color:#1e7e34;border-color:#1c7430}.btn-success:not(:disabled):not(.disabled):active:focus,.btn-success:not(:disabled):not(.disabled).active:focus,.show>.btn-success.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(72,180,97,0.5)}.btn-info{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-info:hover{color:#fff;background-color:#138496;border-color:#117a8b}.btn-info:focus,.btn-info.focus{box-shadow:0 0 0 .2rem rgba(58,176,195,0.5)}.btn-info.disabled,.btn-info:disabled{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-info:not(:disabled):not(.disabled):active,.btn-info:not(:disabled):not(.disabled).active,.show>.btn-info.dropdown-toggle{color:#fff;background-color:#117a8b;border-color:#10707f}.btn-info:not(:disabled):not(.disabled):active:focus,.btn-info:not(:disabled):not(.disabled).active:focus,.show>.btn-info.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(58,176,195,0.5)}.btn-warning{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-warning:hover{color:#212529;background-color:#e0a800;border-color:#d39e00}.btn-warning:focus,.btn-warning.focus{box-shadow:0 0 0 .2rem rgba(222,170,12,0.5)}.btn-warning.disabled,.btn-warning:disabled{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-warning:not(:disabled):not(.disabled):active,.btn-warning:not(:disabled):not(.disabled).active,.show>.btn-warning.dropdown-toggle{color:#212529;background-color:#d39e00;border-color:#c69500}.btn-warning:not(:disabled):not(.disabled):active:focus,.btn-warning:not(:disabled):not(.disabled).active:focus,.show>.btn-warning.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(222,170,12,0.5)}.btn-danger{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-danger:hover{color:#fff;background-color:#c82333;border-color:#bd2130}.btn-danger:focus,.btn-danger.focus{box-shadow:0 0 0 .2rem rgba(225,83,97,0.5)}.btn-danger.disabled,.btn-danger:disabled{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-danger:not(:disabled):not(.disabled):active,.btn-danger:not(:disabled):not(.disabled).active,.show>.btn-danger.dropdown-toggle{color:#fff;background-color:#bd2130;border-color:#b21f2d}.btn-danger:not(:disabled):not(.disabled):active:focus,.btn-danger:not(:disabled):not(.disabled).active:focus,.show>.btn-danger.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(225,83,97,0.5)}.btn-light{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-light:hover{color:#212529;background-color:#e2e6ea;border-color:#dae0e5}.btn-light:focus,.btn-light.focus{box-shadow:0 0 0 .2rem rgba(216,217,219,0.5)}.btn-light.disabled,.btn-light:disabled{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-light:not(:disabled):not(.disabled):active,.btn-light:not(:disabled):not(.disabled).active,.show>.btn-light.dropdown-toggle{color:#212529;background-color:#dae0e5;border-color:#d3d9df}.btn-light:not(:disabled):not(.disabled):active:focus,.btn-light:not(:disabled):not(.disabled).active:focus,.show>.btn-light.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(216,217,219,0.5)}.btn-dark{color:#fff;background-color:#343a40;border-color:#343a40}.btn-dark:hover{color:#fff;background-color:#23272b;border-color:#1d2124}.btn-dark:focus,.btn-dark.focus{box-shadow:0 0 0 .2rem rgba(82,88,93,0.5)}.btn-dark.disabled,.btn-dark:disabled{color:#fff;background-color:#343a40;border-color:#343a40}.btn-dark:not(:disabled):not(.disabled):active,.btn-dark:not(:disabled):not(.disabled).active,.show>.btn-dark.dropdown-toggle{color:#fff;background-color:#1d2124;border-color:#171a1d}.btn-dark:not(:disabled):not(.disabled):active:focus,.btn-dark:not(:disabled):not(.disabled).active:focus,.show>.btn-dark.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(82,88,93,0.5)}.btn-outline-primary{color:#007bff;border-color:#007bff}.btn-outline-primary:hover{color:#fff;background-color:#007bff;border-color:#007bff}.btn-outline-primary:focus,.btn-outline-primary.focus{box-shadow:0 0 0 .2rem rgba(0,123,255,0.5)}.btn-outline-primary.disabled,.btn-outline-primary:disabled{color:#007bff;background-color:transparent}.btn-outline-primary:not(:disabled):not(.disabled):active,.btn-outline-primary:not(:disabled):not(.disabled).active,.show>.btn-outline-primary.dropdown-toggle{color:#fff;background-color:#007bff;border-color:#007bff}.btn-outline-primary:not(:disabled):not(.disabled):active:focus,.btn-outline-primary:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-primary.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(0,123,255,0.5)}.btn-outline-secondary{color:#6c757d;border-color:#6c757d}.btn-outline-secondary:hover{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-outline-secondary:focus,.btn-outline-secondary.focus{box-shadow:0 0 0 .2rem rgba(108,117,125,0.5)}.btn-outline-secondary.disabled,.btn-outline-secondary:disabled{color:#6c757d;background-color:transparent}.btn-outline-secondary:not(:disabled):not(.disabled):active,.btn-outline-secondary:not(:disabled):not(.disabled).active,.show>.btn-outline-secondary.dropdown-toggle{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-outline-secondary:not(:disabled):not(.disabled):active:focus,.btn-outline-secondary:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-secondary.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(108,117,125,0.5)}.btn-outline-success{color:#28a745;border-color:#28a745}.btn-outline-success:hover{color:#fff;background-color:#28a745;border-color:#28a745}.btn-outline-success:focus,.btn-outline-success.focus{box-shadow:0 0 0 .2rem rgba(40,167,69,0.5)}.btn-outline-success.disabled,.btn-outline-success:disabled{color:#28a745;background-color:transparent}.btn-outline-success:not(:disabled):not(.disabled):active,.btn-outline-success:not(:disabled):not(.disabled).active,.show>.btn-outline-success.dropdown-toggle{color:#fff;background-color:#28a745;border-color:#28a745}.btn-outline-success:not(:disabled):not(.disabled):active:focus,.btn-outline-success:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-success.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(40,167,69,0.5)}.btn-outline-info{color:#17a2b8;border-color:#17a2b8}.btn-outline-info:hover{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-outline-info:focus,.btn-outline-info.focus{box-shadow:0 0 0 .2rem rgba(23,162,184,0.5)}.btn-outline-info.disabled,.btn-outline-info:disabled{color:#17a2b8;background-color:transparent}.btn-outline-info:not(:disabled):not(.disabled):active,.btn-outline-info:not(:disabled):not(.disabled).active,.show>.btn-outline-info.dropdown-toggle{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-outline-info:not(:disabled):not(.disabled):active:focus,.btn-outline-info:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-info.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(23,162,184,0.5)}.btn-outline-warning{color:#ffc107;border-color:#ffc107}.btn-outline-warning:hover{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-outline-warning:focus,.btn-outline-warning.focus{box-shadow:0 0 0 .2rem rgba(255,193,7,0.5)}.btn-outline-warning.disabled,.btn-outline-warning:disabled{color:#ffc107;background-color:transparent}.btn-outline-warning:not(:disabled):not(.disabled):active,.btn-outline-warning:not(:disabled):not(.disabled).active,.show>.btn-outline-warning.dropdown-toggle{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-outline-warning:not(:disabled):not(.disabled):active:focus,.btn-outline-warning:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-warning.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(255,193,7,0.5)}.btn-outline-danger{color:#dc3545;border-color:#dc3545}.btn-outline-danger:hover{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-outline-danger:focus,.btn-outline-danger.focus{box-shadow:0 0 0 .2rem rgba(220,53,69,0.5)}.btn-outline-danger.disabled,.btn-outline-danger:disabled{color:#dc3545;background-color:transparent}.btn-outline-danger:not(:disabled):not(.disabled):active,.btn-outline-danger:not(:disabled):not(.disabled).active,.show>.btn-outline-danger.dropdown-toggle{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-outline-danger:not(:disabled):not(.disabled):active:focus,.btn-outline-danger:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-danger.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(220,53,69,0.5)}.btn-outline-light{color:#f8f9fa;border-color:#f8f9fa}.btn-outline-light:hover{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-outline-light:focus,.btn-outline-light.focus{box-shadow:0 0 0 .2rem rgba(248,249,250,0.5)}.btn-outline-light.disabled,.btn-outline-light:disabled{color:#f8f9fa;background-color:transparent}.btn-outline-light:not(:disabled):not(.disabled):active,.btn-outline-light:not(:disabled):not(.disabled).active,.show>.btn-outline-light.dropdown-toggle{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-outline-light:not(:disabled):not(.disabled):active:focus,.btn-outline-light:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-light.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(248,249,250,0.5)}.btn-outline-dark{color:#343a40;border-color:#343a40}.btn-outline-dark:hover{color:#fff;background-color:#343a40;border-color:#343a40}.btn-outline-dark:focus,.btn-outline-dark.focus{box-shadow:0 0 0 .2rem rgba(52,58,64,0.5)}.btn-outline-dark.disabled,.btn-outline-dark:disabled{color:#343a40;background-color:transparent}.btn-outline-dark:not(:disabled):not(.disabled):active,.btn-outline-dark:not(:disabled):not(.disabled).active,.show>.btn-outline-dark.dropdown-toggle{color:#fff;background-color:#343a40;border-color:#343a40}.btn-outline-dark:not(:disabled):not(.disabled):active:focus,.btn-outline-dark:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-dark.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(52,58,64,0.5)}.btn-link{font-weight:400;color:#007bff;text-decoration:none}.btn-link:hover{color:#0056b3;text-decoration:underline}.btn-link:focus,.btn-link.focus{text-decoration:underline;box-shadow:none}.btn-link:disabled,.btn-link.disabled{color:#6c757d;pointer-events:none}.btn-lg,.btn-group-lg>.btn{padding:.5rem 1rem;font-size:1.25rem;line-height:1.5;border-radius:.3rem}.btn-sm,.btn-group-sm>.btn{padding:.25rem .5rem;font-size:.875rem;line-height:1.5;border-radius:.2rem}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:.5rem}input[type="submit"].btn-block,input[type="reset"].btn-block,input[type="button"].btn-block{width:100%}.fade{transition:opacity 0.15s linear}@media (prefers-reduced-motion: reduce){.fade{transition:none}}.fade:not(.show){opacity:0}.collapse:not(.show){display:none}.collapsing{position:relative;height:0;overflow:hidden;transition:height 0.35s ease}@media (prefers-reduced-motion: reduce){.collapsing{transition:none}}.dropup,.dropright,.dropdown,.dropleft{position:relative}.dropdown-toggle{white-space:nowrap}.dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid;border-right:.3em solid transparent;border-bottom:0;border-left:.3em solid transparent}.dropdown-toggle:empty::after{margin-left:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:10rem;padding:.5rem 0;margin:.125rem 0 0;font-size:1rem;color:#212529;text-align:left;list-style:none;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,0.15);border-radius:.25rem}.dropdown-menu-left{right:auto;left:0}.dropdown-menu-right{right:0;left:auto}@media (min-width: 576px){.dropdown-menu-sm-left{right:auto;left:0}.dropdown-menu-sm-right{right:0;left:auto}}@media (min-width: 768px){.dropdown-menu-md-left{right:auto;left:0}.dropdown-menu-md-right{right:0;left:auto}}@media (min-width: 992px){.dropdown-menu-lg-left{right:auto;left:0}.dropdown-menu-lg-right{right:0;left:auto}}@media (min-width: 1200px){.dropdown-menu-xl-left{right:auto;left:0}.dropdown-menu-xl-right{right:0;left:auto}}.dropup .dropdown-menu{top:auto;bottom:100%;margin-top:0;margin-bottom:.125rem}.dropup .dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:0;border-right:.3em solid transparent;border-bottom:.3em solid;border-left:.3em solid transparent}.dropup .dropdown-toggle:empty::after{margin-left:0}.dropright .dropdown-menu{top:0;right:auto;left:100%;margin-top:0;margin-left:.125rem}.dropright .dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid transparent;border-right:0;border-bottom:.3em solid transparent;border-left:.3em solid}.dropright .dropdown-toggle:empty::after{margin-left:0}.dropright .dropdown-toggle::after{vertical-align:0}.dropleft .dropdown-menu{top:0;right:100%;left:auto;margin-top:0;margin-right:.125rem}.dropleft .dropdown-toggle::after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:""}.dropleft .dropdown-toggle::after{display:none}.dropleft .dropdown-toggle::before{display:inline-block;margin-right:.255em;vertical-align:.255em;content:"";border-top:.3em solid transparent;border-right:.3em solid;border-bottom:.3em solid transparent}.dropleft .dropdown-toggle:empty::after{margin-left:0}.dropleft .dropdown-toggle::before{vertical-align:0}.dropdown-menu[x-placement^="top"],.dropdown-menu[x-placement^="right"],.dropdown-menu[x-placement^="bottom"],.dropdown-menu[x-placement^="left"]{right:auto;bottom:auto}.dropdown-divider{height:0;margin:.5rem 0;overflow:hidden;border-top:1px solid #e9ecef}.dropdown-item{display:block;width:100%;padding:.25rem 1.5rem;clear:both;font-weight:400;color:#212529;text-align:inherit;white-space:nowrap;background-color:transparent;border:0}.dropdown-item:hover,.dropdown-item:focus{color:#16181b;text-decoration:none;background-color:#f8f9fa}.dropdown-item.active,.dropdown-item:active{color:#fff;text-decoration:none;background-color:#007bff}.dropdown-item.disabled,.dropdown-item:disabled{color:#6c757d;pointer-events:none;background-color:transparent}.dropdown-menu.show{display:block}.dropdown-header{display:block;padding:.5rem 1.5rem;margin-bottom:0;font-size:.875rem;color:#6c757d;white-space:nowrap}.dropdown-item-text{display:block;padding:.25rem 1.5rem;color:#212529}.btn-group,.btn-group-vertical{position:relative;display:inline-flex;vertical-align:middle}.btn-group>.btn,.btn-group-vertical>.btn{position:relative;flex:1 1 auto}.btn-group>.btn:hover,.btn-group-vertical>.btn:hover{z-index:1}.btn-group>.btn:focus,.btn-group>.btn:active,.btn-group>.btn.active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn.active{z-index:1}.btn-toolbar{display:flex;flex-wrap:wrap;justify-content:flex-start}.btn-toolbar .input-group{width:auto}.btn-group>.btn:not(:first-child),.btn-group>.btn-group:not(:first-child){margin-left:-1px}.btn-group>.btn:not(:last-child):not(.dropdown-toggle),.btn-group>.btn-group:not(:last-child)>.btn{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:not(:first-child),.btn-group>.btn-group:not(:first-child)>.btn{border-top-left-radius:0;border-bottom-left-radius:0}.dropdown-toggle-split{padding-right:.5625rem;padding-left:.5625rem}.dropdown-toggle-split::after,.dropup .dropdown-toggle-split::after,.dropright .dropdown-toggle-split::after{margin-left:0}.dropleft .dropdown-toggle-split::before{margin-right:0}.btn-sm+.dropdown-toggle-split,.btn-group-sm>.btn+.dropdown-toggle-split{padding-right:.375rem;padding-left:.375rem}.btn-lg+.dropdown-toggle-split,.btn-group-lg>.btn+.dropdown-toggle-split{padding-right:.75rem;padding-left:.75rem}.btn-group-vertical{flex-direction:column;align-items:flex-start;justify-content:center}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group{width:100%}.btn-group-vertical>.btn:not(:first-child),.btn-group-vertical>.btn-group:not(:first-child){margin-top:-1px}.btn-group-vertical>.btn:not(:last-child):not(.dropdown-toggle),.btn-group-vertical>.btn-group:not(:last-child)>.btn{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:not(:first-child),.btn-group-vertical>.btn-group:not(:first-child)>.btn{border-top-left-radius:0;border-top-right-radius:0}.btn-group-toggle>.btn,.btn-group-toggle>.btn-group>.btn{margin-bottom:0}.btn-group-toggle>.btn input[type="radio"],.btn-group-toggle>.btn input[type="checkbox"],.btn-group-toggle>.btn-group>.btn input[type="radio"],.btn-group-toggle>.btn-group>.btn input[type="checkbox"]{position:absolute;clip:rect(0, 0, 0, 0);pointer-events:none}.input-group{position:relative;display:flex;flex-wrap:wrap;align-items:stretch;width:100%}.input-group>.form-control,.input-group>.form-control-plaintext,.input-group>.custom-select,.input-group>.custom-file{position:relative;flex:1 1 auto;width:1%;margin-bottom:0}.input-group>.form-control+.form-control,.input-group>.form-control+.custom-select,.input-group>.form-control+.custom-file,.input-group>.form-control-plaintext+.form-control,.input-group>.form-control-plaintext+.custom-select,.input-group>.form-control-plaintext+.custom-file,.input-group>.custom-select+.form-control,.input-group>.custom-select+.custom-select,.input-group>.custom-select+.custom-file,.input-group>.custom-file+.form-control,.input-group>.custom-file+.custom-select,.input-group>.custom-file+.custom-file{margin-left:-1px}.input-group>.form-control:focus,.input-group>.custom-select:focus,.input-group>.custom-file .custom-file-input:focus ~ .custom-file-label{z-index:3}.input-group>.custom-file .custom-file-input:focus{z-index:4}.input-group>.form-control:not(:last-child),.input-group>.custom-select:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.form-control:not(:first-child),.input-group>.custom-select:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.input-group>.custom-file{display:flex;align-items:center}.input-group>.custom-file:not(:last-child) .custom-file-label,.input-group>.custom-file:not(:last-child) .custom-file-label::after{border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.custom-file:not(:first-child) .custom-file-label{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-prepend,.input-group-append{display:flex}.input-group-prepend .btn,.input-group-append .btn{position:relative;z-index:2}.input-group-prepend .btn:focus,.input-group-append .btn:focus{z-index:3}.input-group-prepend .btn+.btn,.input-group-prepend .btn+.input-group-text,.input-group-prepend .input-group-text+.input-group-text,.input-group-prepend .input-group-text+.btn,.input-group-append .btn+.btn,.input-group-append .btn+.input-group-text,.input-group-append .input-group-text+.input-group-text,.input-group-append .input-group-text+.btn{margin-left:-1px}.input-group-prepend{margin-right:-1px}.input-group-append{margin-left:-1px}.input-group-text{display:flex;align-items:center;padding:.375rem .75rem;margin-bottom:0;font-size:1rem;font-weight:400;line-height:1.5;color:#495057;text-align:center;white-space:nowrap;background-color:#e9ecef;border:1px solid #ced4da;border-radius:.25rem}.input-group-text input[type="radio"],.input-group-text input[type="checkbox"]{margin-top:0}.input-group-lg>.form-control:not(textarea),.input-group-lg>.custom-select{height:calc(1.5em + 1rem + 2px)}.input-group-lg>.form-control,.input-group-lg>.custom-select,.input-group-lg>.input-group-prepend>.input-group-text,.input-group-lg>.input-group-append>.input-group-text,.input-group-lg>.input-group-prepend>.btn,.input-group-lg>.input-group-append>.btn{padding:.5rem 1rem;font-size:1.25rem;line-height:1.5;border-radius:.3rem}.input-group-sm>.form-control:not(textarea),.input-group-sm>.custom-select{height:calc(1.5em + .5rem + 2px)}.input-group-sm>.form-control,.input-group-sm>.custom-select,.input-group-sm>.input-group-prepend>.input-group-text,.input-group-sm>.input-group-append>.input-group-text,.input-group-sm>.input-group-prepend>.btn,.input-group-sm>.input-group-append>.btn{padding:.25rem .5rem;font-size:.875rem;line-height:1.5;border-radius:.2rem}.input-group-lg>.custom-select,.input-group-sm>.custom-select{padding-right:1.75rem}.input-group>.input-group-prepend>.btn,.input-group>.input-group-prepend>.input-group-text,.input-group>.input-group-append:not(:last-child)>.btn,.input-group>.input-group-append:not(:last-child)>.input-group-text,.input-group>.input-group-append:last-child>.btn:not(:last-child):not(.dropdown-toggle),.input-group>.input-group-append:last-child>.input-group-text:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.input-group-append>.btn,.input-group>.input-group-append>.input-group-text,.input-group>.input-group-prepend:not(:first-child)>.btn,.input-group>.input-group-prepend:not(:first-child)>.input-group-text,.input-group>.input-group-prepend:first-child>.btn:not(:first-child),.input-group>.input-group-prepend:first-child>.input-group-text:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.custom-control{position:relative;display:block;min-height:1.5rem;padding-left:1.5rem}.custom-control-inline{display:inline-flex;margin-right:1rem}.custom-control-input{position:absolute;z-index:-1;opacity:0}.custom-control-input:checked ~ .custom-control-label::before{color:#fff;border-color:#007bff;background-color:#007bff}.custom-control-input:focus ~ .custom-control-label::before{box-shadow:0 0 0 .2rem rgba(0,123,255,0.25)}.custom-control-input:focus:not(:checked) ~ .custom-control-label::before{border-color:#80bdff}.custom-control-input:not(:disabled):active ~ .custom-control-label::before{color:#fff;background-color:#b3d7ff;border-color:#b3d7ff}.custom-control-input:disabled ~ .custom-control-label{color:#6c757d}.custom-control-input:disabled ~ .custom-control-label::before{background-color:#e9ecef}.custom-control-label{position:relative;margin-bottom:0;vertical-align:top}.custom-control-label::before{position:absolute;top:.25rem;left:-1.5rem;display:block;width:1rem;height:1rem;pointer-events:none;content:"";background-color:#fff;border:#adb5bd solid 1px}.custom-control-label::after{position:absolute;top:.25rem;left:-1.5rem;display:block;width:1rem;height:1rem;content:"";background:no-repeat 50% / 50% 50%}.custom-checkbox .custom-control-label::before{border-radius:.25rem}.custom-checkbox .custom-control-input:checked ~ .custom-control-label::after{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3e%3cpath fill='%23fff' d='M6.564.75l-3.59 3.612-1.538-1.55L0 4.26 2.974 7.25 8 2.193z'/%3e%3c/svg%3e")}.custom-checkbox .custom-control-input:indeterminate ~ .custom-control-label::before{border-color:#007bff;background-color:#007bff}.custom-checkbox .custom-control-input:indeterminate ~ .custom-control-label::after{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 4'%3e%3cpath stroke='%23fff' d='M0 2h4'/%3e%3c/svg%3e")}.custom-checkbox .custom-control-input:disabled:checked ~ .custom-control-label::before{background-color:rgba(0,123,255,0.5)}.custom-checkbox .custom-control-input:disabled:indeterminate ~ .custom-control-label::before{background-color:rgba(0,123,255,0.5)}.custom-radio .custom-control-label::before{border-radius:50%}.custom-radio .custom-control-input:checked ~ .custom-control-label::after{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3e%3ccircle r='3' fill='%23fff'/%3e%3c/svg%3e")}.custom-radio .custom-control-input:disabled:checked ~ .custom-control-label::before{background-color:rgba(0,123,255,0.5)}.custom-switch{padding-left:2.25rem}.custom-switch .custom-control-label::before{left:-2.25rem;width:1.75rem;pointer-events:all;border-radius:.5rem}.custom-switch .custom-control-label::after{top:calc(.25rem + 2px);left:calc(-2.25rem + 2px);width:calc(1rem - 4px);height:calc(1rem - 4px);background-color:#adb5bd;border-radius:.5rem;transition:transform 0.15s ease-in-out,background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out}@media (prefers-reduced-motion: reduce){.custom-switch .custom-control-label::after{transition:none}}.custom-switch .custom-control-input:checked ~ .custom-control-label::after{background-color:#fff;transform:translateX(.75rem)}.custom-switch .custom-control-input:disabled:checked ~ .custom-control-label::before{background-color:rgba(0,123,255,0.5)}.custom-select{display:inline-block;width:100%;height:calc(1.5em + .75rem + 2px);padding:.375rem 1.75rem .375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:#495057;vertical-align:middle;background:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 5'%3e%3cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3e%3c/svg%3e") no-repeat right .75rem center/8px 10px;background-color:#fff;border:1px solid #ced4da;border-radius:.25rem;-webkit-appearance:none;-moz-appearance:none;appearance:none}.custom-select:focus{border-color:#80bdff;outline:0;box-shadow:0 0 0 .2rem rgba(0,123,255,0.25)}.custom-select:focus::-ms-value{color:#495057;background-color:#fff}.custom-select[multiple],.custom-select[size]:not([size="1"]){height:auto;padding-right:.75rem;background-image:none}.custom-select:disabled{color:#6c757d;background-color:#e9ecef}.custom-select::-ms-expand{display:none}.custom-select-sm{height:calc(1.5em + .5rem + 2px);padding-top:.25rem;padding-bottom:.25rem;padding-left:.5rem;font-size:.875rem}.custom-select-lg{height:calc(1.5em + 1rem + 2px);padding-top:.5rem;padding-bottom:.5rem;padding-left:1rem;font-size:1.25rem}.custom-file{position:relative;display:inline-block;width:100%;height:calc(1.5em + .75rem + 2px);margin-bottom:0}.custom-file-input{position:relative;z-index:2;width:100%;height:calc(1.5em + .75rem + 2px);margin:0;opacity:0}.custom-file-input:focus ~ .custom-file-label{border-color:#80bdff;box-shadow:0 0 0 .2rem rgba(0,123,255,0.25)}.custom-file-input:disabled ~ .custom-file-label{background-color:#e9ecef}.custom-file-input:lang(en) ~ .custom-file-label::after{content:"Browse"}.custom-file-input ~ .custom-file-label[data-browse]::after{content:attr(data-browse)}.custom-file-label{position:absolute;top:0;right:0;left:0;z-index:1;height:calc(1.5em + .75rem + 2px);padding:.375rem .75rem;font-weight:400;line-height:1.5;color:#495057;background-color:#fff;border:1px solid #ced4da;border-radius:.25rem}.custom-file-label::after{position:absolute;top:0;right:0;bottom:0;z-index:3;display:block;height:calc(1.5em + .75rem);padding:.375rem .75rem;line-height:1.5;color:#495057;content:"Browse";background-color:#e9ecef;border-left:inherit;border-radius:0 .25rem .25rem 0}.custom-range{width:100%;height:calc(1rem + .4rem);padding:0;background-color:transparent;-webkit-appearance:none;-moz-appearance:none;appearance:none}.custom-range:focus{outline:none}.custom-range:focus::-webkit-slider-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(0,123,255,0.25)}.custom-range:focus::-moz-range-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(0,123,255,0.25)}.custom-range:focus::-ms-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(0,123,255,0.25)}.custom-range::-moz-focus-outer{border:0}.custom-range::-webkit-slider-thumb{width:1rem;height:1rem;margin-top:-.25rem;background-color:#007bff;border:0;border-radius:1rem;-webkit-transition:background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out;-webkit-appearance:none;appearance:none}@media (prefers-reduced-motion: reduce){.custom-range::-webkit-slider-thumb{-webkit-transition:none;transition:none}}.custom-range::-webkit-slider-thumb:active{background-color:#b3d7ff}.custom-range::-webkit-slider-runnable-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:#dee2e6;border-color:transparent;border-radius:1rem}.custom-range::-moz-range-thumb{width:1rem;height:1rem;background-color:#007bff;border:0;border-radius:1rem;-moz-transition:background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out;-moz-appearance:none;appearance:none}@media (prefers-reduced-motion: reduce){.custom-range::-moz-range-thumb{-moz-transition:none;transition:none}}.custom-range::-moz-range-thumb:active{background-color:#b3d7ff}.custom-range::-moz-range-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:#dee2e6;border-color:transparent;border-radius:1rem}.custom-range::-ms-thumb{width:1rem;height:1rem;margin-top:0;margin-right:.2rem;margin-left:.2rem;background-color:#007bff;border:0;border-radius:1rem;-ms-transition:background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out;appearance:none}@media (prefers-reduced-motion: reduce){.custom-range::-ms-thumb{-ms-transition:none;transition:none}}.custom-range::-ms-thumb:active{background-color:#b3d7ff}.custom-range::-ms-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:transparent;border-color:transparent;border-width:.5rem}.custom-range::-ms-fill-lower{background-color:#dee2e6;border-radius:1rem}.custom-range::-ms-fill-upper{margin-right:15px;background-color:#dee2e6;border-radius:1rem}.custom-range:disabled::-webkit-slider-thumb{background-color:#adb5bd}.custom-range:disabled::-webkit-slider-runnable-track{cursor:default}.custom-range:disabled::-moz-range-thumb{background-color:#adb5bd}.custom-range:disabled::-moz-range-track{cursor:default}.custom-range:disabled::-ms-thumb{background-color:#adb5bd}.custom-control-label::before,.custom-file-label,.custom-select{transition:background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out}@media (prefers-reduced-motion: reduce){.custom-control-label::before,.custom-file-label,.custom-select{transition:none}}.nav{display:flex;flex-wrap:wrap;padding-left:0;margin-bottom:0;list-style:none}.nav-link{display:block;padding:.5rem 1rem}.nav-link:hover,.nav-link:focus{text-decoration:none}.nav-link.disabled{color:#6c757d;pointer-events:none;cursor:default}.nav-tabs{border-bottom:1px solid #dee2e6}.nav-tabs .nav-item{margin-bottom:-1px}.nav-tabs .nav-link{border:1px solid transparent;border-top-left-radius:.25rem;border-top-right-radius:.25rem}.nav-tabs .nav-link:hover,.nav-tabs .nav-link:focus{border-color:#e9ecef #e9ecef #dee2e6}.nav-tabs .nav-link.disabled{color:#6c757d;background-color:transparent;border-color:transparent}.nav-tabs .nav-link.active,.nav-tabs .nav-item.show .nav-link{color:#495057;background-color:#fff;border-color:#dee2e6 #dee2e6 #fff}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.nav-pills .nav-link{border-radius:.25rem}.nav-pills .nav-link.active,.nav-pills .show>.nav-link{color:#fff;background-color:#007bff}.nav-fill .nav-item{flex:1 1 auto;text-align:center}.nav-justified .nav-item{flex-basis:0;flex-grow:1;text-align:center}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.navbar{position:relative;display:flex;flex-wrap:wrap;align-items:center;justify-content:space-between;padding:.5rem 1rem}.navbar>.container,.navbar>.container-fluid{display:flex;flex-wrap:wrap;align-items:center;justify-content:space-between}.navbar-brand{display:inline-block;padding-top:.3125rem;padding-bottom:.3125rem;margin-right:1rem;font-size:1.25rem;line-height:inherit;white-space:nowrap}.navbar-brand:hover,.navbar-brand:focus{text-decoration:none}.navbar-nav{display:flex;flex-direction:column;padding-left:0;margin-bottom:0;list-style:none}.navbar-nav .nav-link{padding-right:0;padding-left:0}.navbar-nav .dropdown-menu{position:static;float:none}.navbar-text{display:inline-block;padding-top:.5rem;padding-bottom:.5rem}.navbar-collapse{flex-basis:100%;flex-grow:1;align-items:center}.navbar-toggler{padding:.25rem .75rem;font-size:1.25rem;line-height:1;background-color:transparent;border:1px solid transparent;border-radius:.25rem}.navbar-toggler:hover,.navbar-toggler:focus{text-decoration:none}.navbar-toggler-icon{display:inline-block;width:1.5em;height:1.5em;vertical-align:middle;content:"";background:no-repeat center center;background-size:100% 100%}@media (max-width: 575.98px){.navbar-expand-sm>.container,.navbar-expand-sm>.container-fluid{padding-right:0;padding-left:0}}@media (min-width: 576px){.navbar-expand-sm{flex-flow:row nowrap;justify-content:flex-start}.navbar-expand-sm .navbar-nav{flex-direction:row}.navbar-expand-sm .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-sm .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-sm>.container,.navbar-expand-sm>.container-fluid{flex-wrap:nowrap}.navbar-expand-sm .navbar-collapse{display:flex !important;flex-basis:auto}.navbar-expand-sm .navbar-toggler{display:none}}@media (max-width: 767.98px){.navbar-expand-md>.container,.navbar-expand-md>.container-fluid{padding-right:0;padding-left:0}}@media (min-width: 768px){.navbar-expand-md{flex-flow:row nowrap;justify-content:flex-start}.navbar-expand-md .navbar-nav{flex-direction:row}.navbar-expand-md .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-md .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-md>.container,.navbar-expand-md>.container-fluid{flex-wrap:nowrap}.navbar-expand-md .navbar-collapse{display:flex !important;flex-basis:auto}.navbar-expand-md .navbar-toggler{display:none}}@media (max-width: 991.98px){.navbar-expand-lg>.container,.navbar-expand-lg>.container-fluid{padding-right:0;padding-left:0}}@media (min-width: 992px){.navbar-expand-lg{flex-flow:row nowrap;justify-content:flex-start}.navbar-expand-lg .navbar-nav{flex-direction:row}.navbar-expand-lg .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-lg .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-lg>.container,.navbar-expand-lg>.container-fluid{flex-wrap:nowrap}.navbar-expand-lg .navbar-collapse{display:flex !important;flex-basis:auto}.navbar-expand-lg .navbar-toggler{display:none}}@media (max-width: 1199.98px){.navbar-expand-xl>.container,.navbar-expand-xl>.container-fluid{padding-right:0;padding-left:0}}@media (min-width: 1200px){.navbar-expand-xl{flex-flow:row nowrap;justify-content:flex-start}.navbar-expand-xl .navbar-nav{flex-direction:row}.navbar-expand-xl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xl .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-xl>.container,.navbar-expand-xl>.container-fluid{flex-wrap:nowrap}.navbar-expand-xl .navbar-collapse{display:flex !important;flex-basis:auto}.navbar-expand-xl .navbar-toggler{display:none}}.navbar-expand{flex-flow:row nowrap;justify-content:flex-start}.navbar-expand>.container,.navbar-expand>.container-fluid{padding-right:0;padding-left:0}.navbar-expand .navbar-nav{flex-direction:row}.navbar-expand .navbar-nav .dropdown-menu{position:absolute}.navbar-expand .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand>.container,.navbar-expand>.container-fluid{flex-wrap:nowrap}.navbar-expand .navbar-collapse{display:flex !important;flex-basis:auto}.navbar-expand .navbar-toggler{display:none}.navbar-light .navbar-brand{color:rgba(0,0,0,0.9)}.navbar-light .navbar-brand:hover,.navbar-light .navbar-brand:focus{color:rgba(0,0,0,0.9)}.navbar-light .navbar-nav .nav-link{color:rgba(0,0,0,0.5)}.navbar-light .navbar-nav .nav-link:hover,.navbar-light .navbar-nav .nav-link:focus{color:rgba(0,0,0,0.7)}.navbar-light .navbar-nav .nav-link.disabled{color:rgba(0,0,0,0.3)}.navbar-light .navbar-nav .show>.nav-link,.navbar-light .navbar-nav .active>.nav-link,.navbar-light .navbar-nav .nav-link.show,.navbar-light .navbar-nav .nav-link.active{color:rgba(0,0,0,0.9)}.navbar-light .navbar-toggler{color:rgba(0,0,0,0.5);border-color:rgba(0,0,0,0.1)}.navbar-light .navbar-toggler-icon{background-image:url("data:image/svg+xml,%3csvg viewBox='0 0 30 30' xmlns='http://www.w3.org/2000/svg'%3e%3cpath stroke='rgba(0,0,0,0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e")}.navbar-light .navbar-text{color:rgba(0,0,0,0.5)}.navbar-light .navbar-text a{color:rgba(0,0,0,0.9)}.navbar-light .navbar-text a:hover,.navbar-light .navbar-text a:focus{color:rgba(0,0,0,0.9)}.navbar-dark .navbar-brand{color:#fff}.navbar-dark .navbar-brand:hover,.navbar-dark .navbar-brand:focus{color:#fff}.navbar-dark .navbar-nav .nav-link{color:rgba(255,255,255,0.5)}.navbar-dark .navbar-nav .nav-link:hover,.navbar-dark .navbar-nav .nav-link:focus{color:rgba(255,255,255,0.75)}.navbar-dark .navbar-nav .nav-link.disabled{color:rgba(255,255,255,0.25)}.navbar-dark .navbar-nav .show>.nav-link,.navbar-dark .navbar-nav .active>.nav-link,.navbar-dark .navbar-nav .nav-link.show,.navbar-dark .navbar-nav .nav-link.active{color:#fff}.navbar-dark .navbar-toggler{color:rgba(255,255,255,0.5);border-color:rgba(255,255,255,0.1)}.navbar-dark .navbar-toggler-icon{background-image:url("data:image/svg+xml,%3csvg viewBox='0 0 30 30' xmlns='http://www.w3.org/2000/svg'%3e%3cpath stroke='rgba(255,255,255,0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 7h22M4 15h22M4 23h22'/%3e%3c/svg%3e")}.navbar-dark .navbar-text{color:rgba(255,255,255,0.5)}.navbar-dark .navbar-text a{color:#fff}.navbar-dark .navbar-text a:hover,.navbar-dark .navbar-text a:focus{color:#fff}.card{position:relative;display:flex;flex-direction:column;min-width:0;word-wrap:break-word;background-color:#fff;background-clip:border-box;border:1px solid rgba(0,0,0,0.125);border-radius:.25rem}.card>hr{margin-right:0;margin-left:0}.card>.list-group:first-child .list-group-item:first-child{border-top-left-radius:.25rem;border-top-right-radius:.25rem}.card>.list-group:last-child .list-group-item:last-child{border-bottom-right-radius:.25rem;border-bottom-left-radius:.25rem}.card-body{flex:1 1 auto;padding:1.25rem}.card-title{margin-bottom:.75rem}.card-subtitle{margin-top:-.375rem;margin-bottom:0}.card-text:last-child{margin-bottom:0}.card-link:hover{text-decoration:none}.card-link+.card-link{margin-left:1.25rem}.card-header{padding:.75rem 1.25rem;margin-bottom:0;background-color:rgba(0,0,0,0.03);border-bottom:1px solid rgba(0,0,0,0.125)}.card-header:first-child{border-radius:calc(.25rem - 1px) calc(.25rem - 1px) 0 0}.card-header+.list-group .list-group-item:first-child{border-top:0}.card-footer{padding:.75rem 1.25rem;background-color:rgba(0,0,0,0.03);border-top:1px solid rgba(0,0,0,0.125)}.card-footer:last-child{border-radius:0 0 calc(.25rem - 1px) calc(.25rem - 1px)}.card-header-tabs{margin-right:-.625rem;margin-bottom:-0.75rem;margin-left:-.625rem;border-bottom:0}.card-header-pills{margin-right:-.625rem;margin-left:-.625rem}.card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1.25rem}.card-img{width:100%;border-radius:calc(.25rem - 1px)}.card-img-top{width:100%;border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.card-img-bottom{width:100%;border-bottom-right-radius:calc(.25rem - 1px);border-bottom-left-radius:calc(.25rem - 1px)}.card-deck{display:flex;flex-direction:column}.card-deck .card{margin-bottom:15px}@media (min-width: 576px){.card-deck{flex-flow:row wrap;margin-right:-15px;margin-left:-15px}.card-deck .card{display:flex;flex:1 0 0%;flex-direction:column;margin-right:15px;margin-bottom:0;margin-left:15px}}.card-group{display:flex;flex-direction:column}.card-group>.card{margin-bottom:15px}@media (min-width: 576px){.card-group{flex-flow:row wrap}.card-group>.card{flex:1 0 0%;margin-bottom:0}.card-group>.card+.card{margin-left:0;border-left:0}.card-group>.card:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.card-group>.card:not(:last-child) .card-img-top,.card-group>.card:not(:last-child) .card-header{border-top-right-radius:0}.card-group>.card:not(:last-child) .card-img-bottom,.card-group>.card:not(:last-child) .card-footer{border-bottom-right-radius:0}.card-group>.card:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.card-group>.card:not(:first-child) .card-img-top,.card-group>.card:not(:first-child) .card-header{border-top-left-radius:0}.card-group>.card:not(:first-child) .card-img-bottom,.card-group>.card:not(:first-child) .card-footer{border-bottom-left-radius:0}}.card-columns .card{margin-bottom:.75rem}@media (min-width: 576px){.card-columns{-moz-column-count:3;column-count:3;-moz-column-gap:1.25rem;column-gap:1.25rem;orphans:1;widows:1}.card-columns .card{display:inline-block;width:100%}}.accordion>.card{overflow:hidden}.accordion>.card:not(:first-of-type) .card-header:first-child{border-radius:0}.accordion>.card:not(:first-of-type):not(:last-of-type){border-bottom:0;border-radius:0}.accordion>.card:first-of-type{border-bottom:0;border-bottom-right-radius:0;border-bottom-left-radius:0}.accordion>.card:last-of-type{border-top-left-radius:0;border-top-right-radius:0}.accordion>.card .card-header{margin-bottom:-1px}.breadcrumb{display:flex;flex-wrap:wrap;padding:.75rem 1rem;margin-bottom:1rem;list-style:none;background-color:#e9ecef;border-radius:.25rem}.breadcrumb-item+.breadcrumb-item{padding-left:.5rem}.breadcrumb-item+.breadcrumb-item::before{display:inline-block;padding-right:.5rem;color:#6c757d;content:"/"}.breadcrumb-item+.breadcrumb-item:hover::before{text-decoration:underline}.breadcrumb-item+.breadcrumb-item:hover::before{text-decoration:none}.breadcrumb-item.active{color:#6c757d}.pagination{display:flex;padding-left:0;list-style:none;border-radius:.25rem}.page-link{position:relative;display:block;padding:.5rem .75rem;margin-left:-1px;line-height:1.25;color:#007bff;background-color:#fff;border:1px solid #dee2e6}.page-link:hover{z-index:2;color:#0056b3;text-decoration:none;background-color:#e9ecef;border-color:#dee2e6}.page-link:focus{z-index:2;outline:0;box-shadow:0 0 0 .2rem rgba(0,123,255,0.25)}.page-item:first-child .page-link{margin-left:0;border-top-left-radius:.25rem;border-bottom-left-radius:.25rem}.page-item:last-child .page-link{border-top-right-radius:.25rem;border-bottom-right-radius:.25rem}.page-item.active .page-link{z-index:1;color:#fff;background-color:#007bff;border-color:#007bff}.page-item.disabled .page-link{color:#6c757d;pointer-events:none;cursor:auto;background-color:#fff;border-color:#dee2e6}.pagination-lg .page-link{padding:.75rem 1.5rem;font-size:1.25rem;line-height:1.5}.pagination-lg .page-item:first-child .page-link{border-top-left-radius:.3rem;border-bottom-left-radius:.3rem}.pagination-lg .page-item:last-child .page-link{border-top-right-radius:.3rem;border-bottom-right-radius:.3rem}.pagination-sm .page-link{padding:.25rem .5rem;font-size:.875rem;line-height:1.5}.pagination-sm .page-item:first-child .page-link{border-top-left-radius:.2rem;border-bottom-left-radius:.2rem}.pagination-sm .page-item:last-child .page-link{border-top-right-radius:.2rem;border-bottom-right-radius:.2rem}.badge{display:inline-block;padding:.25em .4em;font-size:75%;font-weight:700;line-height:1;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem;transition:color 0.15s ease-in-out,background-color 0.15s ease-in-out,border-color 0.15s ease-in-out,box-shadow 0.15s ease-in-out}@media (prefers-reduced-motion: reduce){.badge{transition:none}}a.badge:hover,a.badge:focus{text-decoration:none}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.badge-pill{padding-right:.6em;padding-left:.6em;border-radius:10rem}.badge-primary{color:#fff;background-color:#007bff}a.badge-primary:hover,a.badge-primary:focus{color:#fff;background-color:#0062cc}a.badge-primary:focus,a.badge-primary.focus{outline:0;box-shadow:0 0 0 .2rem rgba(0,123,255,0.5)}.badge-secondary{color:#fff;background-color:#6c757d}a.badge-secondary:hover,a.badge-secondary:focus{color:#fff;background-color:#545b62}a.badge-secondary:focus,a.badge-secondary.focus{outline:0;box-shadow:0 0 0 .2rem rgba(108,117,125,0.5)}.badge-success{color:#fff;background-color:#28a745}a.badge-success:hover,a.badge-success:focus{color:#fff;background-color:#1e7e34}a.badge-success:focus,a.badge-success.focus{outline:0;box-shadow:0 0 0 .2rem rgba(40,167,69,0.5)}.badge-info{color:#fff;background-color:#17a2b8}a.badge-info:hover,a.badge-info:focus{color:#fff;background-color:#117a8b}a.badge-info:focus,a.badge-info.focus{outline:0;box-shadow:0 0 0 .2rem rgba(23,162,184,0.5)}.badge-warning{color:#212529;background-color:#ffc107}a.badge-warning:hover,a.badge-warning:focus{color:#212529;background-color:#d39e00}a.badge-warning:focus,a.badge-warning.focus{outline:0;box-shadow:0 0 0 .2rem rgba(255,193,7,0.5)}.badge-danger{color:#fff;background-color:#dc3545}a.badge-danger:hover,a.badge-danger:focus{color:#fff;background-color:#bd2130}a.badge-danger:focus,a.badge-danger.focus{outline:0;box-shadow:0 0 0 .2rem rgba(220,53,69,0.5)}.badge-light{color:#212529;background-color:#f8f9fa}a.badge-light:hover,a.badge-light:focus{color:#212529;background-color:#dae0e5}a.badge-light:focus,a.badge-light.focus{outline:0;box-shadow:0 0 0 .2rem rgba(248,249,250,0.5)}.badge-dark{color:#fff;background-color:#343a40}a.badge-dark:hover,a.badge-dark:focus{color:#fff;background-color:#1d2124}a.badge-dark:focus,a.badge-dark.focus{outline:0;box-shadow:0 0 0 .2rem rgba(52,58,64,0.5)}.jumbotron{padding:2rem 1rem;margin-bottom:2rem;background-color:#e9ecef;border-radius:.3rem}@media (min-width: 576px){.jumbotron{padding:4rem 2rem}}.jumbotron-fluid{padding-right:0;padding-left:0;border-radius:0}.alert{position:relative;padding:.75rem 1.25rem;margin-bottom:1rem;border:1px solid transparent;border-radius:.25rem}.alert-heading{color:inherit}.alert-link{font-weight:700}.alert-dismissible{padding-right:4rem}.alert-dismissible .close{position:absolute;top:0;right:0;padding:.75rem 1.25rem;color:inherit}.alert-primary{color:#004085;background-color:#cce5ff;border-color:#b8daff}.alert-primary hr{border-top-color:#9fcdff}.alert-primary .alert-link{color:#002752}.alert-secondary{color:#383d41;background-color:#e2e3e5;border-color:#d6d8db}.alert-secondary hr{border-top-color:#c8cbcf}.alert-secondary .alert-link{color:#202326}.alert-success{color:#155724;background-color:#d4edda;border-color:#c3e6cb}.alert-success hr{border-top-color:#b1dfbb}.alert-success .alert-link{color:#0b2e13}.alert-info{color:#0c5460;background-color:#d1ecf1;border-color:#bee5eb}.alert-info hr{border-top-color:#abdde5}.alert-info .alert-link{color:#062c33}.alert-warning{color:#856404;background-color:#fff3cd;border-color:#ffeeba}.alert-warning hr{border-top-color:#ffe8a1}.alert-warning .alert-link{color:#533f03}.alert-danger{color:#721c24;background-color:#f8d7da;border-color:#f5c6cb}.alert-danger hr{border-top-color:#f1b0b7}.alert-danger .alert-link{color:#491217}.alert-light{color:#818182;background-color:#fefefe;border-color:#fdfdfe}.alert-light hr{border-top-color:#ececf6}.alert-light .alert-link{color:#686868}.alert-dark{color:#1b1e21;background-color:#d6d8d9;border-color:#c6c8ca}.alert-dark hr{border-top-color:#b9bbbe}.alert-dark .alert-link{color:#040505}@-webkit-keyframes progress-bar-stripes{from{background-position:1rem 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:1rem 0}to{background-position:0 0}}.progress{display:flex;height:1rem;overflow:hidden;font-size:.75rem;background-color:#e9ecef;border-radius:.25rem}.progress-bar{display:flex;flex-direction:column;justify-content:center;color:#fff;text-align:center;white-space:nowrap;background-color:#007bff;transition:width 0.6s ease}@media (prefers-reduced-motion: reduce){.progress-bar{transition:none}}.progress-bar-striped{background-image:linear-gradient(45deg, rgba(255,255,255,0.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,0.15) 50%, rgba(255,255,255,0.15) 75%, transparent 75%, transparent);background-size:1rem 1rem}.progress-bar-animated{-webkit-animation:progress-bar-stripes 1s linear infinite;animation:progress-bar-stripes 1s linear infinite}@media (prefers-reduced-motion: reduce){.progress-bar-animated{-webkit-animation:none;animation:none}}.media{display:flex;align-items:flex-start}.media-body{flex:1}.list-group{display:flex;flex-direction:column;padding-left:0;margin-bottom:0}.list-group-item-action{width:100%;color:#495057;text-align:inherit}.list-group-item-action:hover,.list-group-item-action:focus{z-index:1;color:#495057;text-decoration:none;background-color:#f8f9fa}.list-group-item-action:active{color:#212529;background-color:#e9ecef}.list-group-item{position:relative;display:block;padding:.75rem 1.25rem;margin-bottom:-1px;background-color:#fff;border:1px solid rgba(0,0,0,0.125)}.list-group-item:first-child{border-top-left-radius:.25rem;border-top-right-radius:.25rem}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:.25rem;border-bottom-left-radius:.25rem}.list-group-item.disabled,.list-group-item:disabled{color:#6c757d;pointer-events:none;background-color:#fff}.list-group-item.active{z-index:2;color:#fff;background-color:#007bff;border-color:#007bff}.list-group-horizontal{flex-direction:row}.list-group-horizontal .list-group-item{margin-right:-1px;margin-bottom:0}.list-group-horizontal .list-group-item:first-child{border-top-left-radius:.25rem;border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal .list-group-item:last-child{margin-right:0;border-top-right-radius:.25rem;border-bottom-right-radius:.25rem;border-bottom-left-radius:0}@media (min-width: 576px){.list-group-horizontal-sm{flex-direction:row}.list-group-horizontal-sm .list-group-item{margin-right:-1px;margin-bottom:0}.list-group-horizontal-sm .list-group-item:first-child{border-top-left-radius:.25rem;border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-sm .list-group-item:last-child{margin-right:0;border-top-right-radius:.25rem;border-bottom-right-radius:.25rem;border-bottom-left-radius:0}}@media (min-width: 768px){.list-group-horizontal-md{flex-direction:row}.list-group-horizontal-md .list-group-item{margin-right:-1px;margin-bottom:0}.list-group-horizontal-md .list-group-item:first-child{border-top-left-radius:.25rem;border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-md .list-group-item:last-child{margin-right:0;border-top-right-radius:.25rem;border-bottom-right-radius:.25rem;border-bottom-left-radius:0}}@media (min-width: 992px){.list-group-horizontal-lg{flex-direction:row}.list-group-horizontal-lg .list-group-item{margin-right:-1px;margin-bottom:0}.list-group-horizontal-lg .list-group-item:first-child{border-top-left-radius:.25rem;border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-lg .list-group-item:last-child{margin-right:0;border-top-right-radius:.25rem;border-bottom-right-radius:.25rem;border-bottom-left-radius:0}}@media (min-width: 1200px){.list-group-horizontal-xl{flex-direction:row}.list-group-horizontal-xl .list-group-item{margin-right:-1px;margin-bottom:0}.list-group-horizontal-xl .list-group-item:first-child{border-top-left-radius:.25rem;border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-xl .list-group-item:last-child{margin-right:0;border-top-right-radius:.25rem;border-bottom-right-radius:.25rem;border-bottom-left-radius:0}}.list-group-flush .list-group-item{border-right:0;border-left:0;border-radius:0}.list-group-flush .list-group-item:last-child{margin-bottom:-1px}.list-group-flush:first-child .list-group-item:first-child{border-top:0}.list-group-flush:last-child .list-group-item:last-child{margin-bottom:0;border-bottom:0}.list-group-item-primary{color:#004085;background-color:#b8daff}.list-group-item-primary.list-group-item-action:hover,.list-group-item-primary.list-group-item-action:focus{color:#004085;background-color:#9fcdff}.list-group-item-primary.list-group-item-action.active{color:#fff;background-color:#004085;border-color:#004085}.list-group-item-secondary{color:#383d41;background-color:#d6d8db}.list-group-item-secondary.list-group-item-action:hover,.list-group-item-secondary.list-group-item-action:focus{color:#383d41;background-color:#c8cbcf}.list-group-item-secondary.list-group-item-action.active{color:#fff;background-color:#383d41;border-color:#383d41}.list-group-item-success{color:#155724;background-color:#c3e6cb}.list-group-item-success.list-group-item-action:hover,.list-group-item-success.list-group-item-action:focus{color:#155724;background-color:#b1dfbb}.list-group-item-success.list-group-item-action.active{color:#fff;background-color:#155724;border-color:#155724}.list-group-item-info{color:#0c5460;background-color:#bee5eb}.list-group-item-info.list-group-item-action:hover,.list-group-item-info.list-group-item-action:focus{color:#0c5460;background-color:#abdde5}.list-group-item-info.list-group-item-action.active{color:#fff;background-color:#0c5460;border-color:#0c5460}.list-group-item-warning{color:#856404;background-color:#ffeeba}.list-group-item-warning.list-group-item-action:hover,.list-group-item-warning.list-group-item-action:focus{color:#856404;background-color:#ffe8a1}.list-group-item-warning.list-group-item-action.active{color:#fff;background-color:#856404;border-color:#856404}.list-group-item-danger{color:#721c24;background-color:#f5c6cb}.list-group-item-danger.list-group-item-action:hover,.list-group-item-danger.list-group-item-action:focus{color:#721c24;background-color:#f1b0b7}.list-group-item-danger.list-group-item-action.active{color:#fff;background-color:#721c24;border-color:#721c24}.list-group-item-light{color:#818182;background-color:#fdfdfe}.list-group-item-light.list-group-item-action:hover,.list-group-item-light.list-group-item-action:focus{color:#818182;background-color:#ececf6}.list-group-item-light.list-group-item-action.active{color:#fff;background-color:#818182;border-color:#818182}.list-group-item-dark{color:#1b1e21;background-color:#c6c8ca}.list-group-item-dark.list-group-item-action:hover,.list-group-item-dark.list-group-item-action:focus{color:#1b1e21;background-color:#b9bbbe}.list-group-item-dark.list-group-item-action.active{color:#fff;background-color:#1b1e21;border-color:#1b1e21}.close{float:right;font-size:1.5rem;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;opacity:.5}.close:hover{color:#000;text-decoration:none}.close:not(:disabled):not(.disabled):hover,.close:not(:disabled):not(.disabled):focus{opacity:.75}button.close{padding:0;background-color:transparent;border:0;-webkit-appearance:none;-moz-appearance:none;appearance:none}a.close.disabled{pointer-events:none}.toast{max-width:350px;overflow:hidden;font-size:.875rem;background-color:rgba(255,255,255,0.85);background-clip:padding-box;border:1px solid rgba(0,0,0,0.1);box-shadow:0 0.25rem 0.75rem rgba(0,0,0,0.1);-webkit-backdrop-filter:blur(10px);backdrop-filter:blur(10px);opacity:0;border-radius:.25rem}.toast:not(:last-child){margin-bottom:.75rem}.toast.showing{opacity:1}.toast.show{display:block;opacity:1}.toast.hide{display:none}.toast-header{display:flex;align-items:center;padding:.25rem .75rem;color:#6c757d;background-color:rgba(255,255,255,0.85);background-clip:padding-box;border-bottom:1px solid rgba(0,0,0,0.05)}.toast-body{padding:.75rem}.modal-open{overflow:hidden}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal{position:fixed;top:0;left:0;z-index:1050;display:none;width:100%;height:100%;overflow:hidden;outline:0}.modal-dialog{position:relative;width:auto;margin:.5rem;pointer-events:none}.modal.fade .modal-dialog{transition:transform 0.3s ease-out;transform:translate(0, -50px)}@media (prefers-reduced-motion: reduce){.modal.fade .modal-dialog{transition:none}}.modal.show .modal-dialog{transform:none}.modal-dialog-scrollable{display:flex;max-height:calc(100% - 1rem)}.modal-dialog-scrollable .modal-content{max-height:calc(100vh - 1rem);overflow:hidden}.modal-dialog-scrollable .modal-header,.modal-dialog-scrollable .modal-footer{flex-shrink:0}.modal-dialog-scrollable .modal-body{overflow-y:auto}.modal-dialog-centered{display:flex;align-items:center;min-height:calc(100% - 1rem)}.modal-dialog-centered::before{display:block;height:calc(100vh - 1rem);content:""}.modal-dialog-centered.modal-dialog-scrollable{flex-direction:column;justify-content:center;height:100%}.modal-dialog-centered.modal-dialog-scrollable .modal-content{max-height:none}.modal-dialog-centered.modal-dialog-scrollable::before{content:none}.modal-content{position:relative;display:flex;flex-direction:column;width:100%;pointer-events:auto;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,0.2);border-radius:.3rem;outline:0}.modal-backdrop{position:fixed;top:0;left:0;z-index:1040;width:100vw;height:100vh;background-color:#000}.modal-backdrop.fade{opacity:0}.modal-backdrop.show{opacity:.5}.modal-header{display:flex;align-items:flex-start;justify-content:space-between;padding:1rem 1rem;border-bottom:1px solid #dee2e6;border-top-left-radius:.3rem;border-top-right-radius:.3rem}.modal-header .close{padding:1rem 1rem;margin:-1rem -1rem -1rem auto}.modal-title{margin-bottom:0;line-height:1.5}.modal-body{position:relative;flex:1 1 auto;padding:1rem}.modal-footer{display:flex;align-items:center;justify-content:flex-end;padding:1rem;border-top:1px solid #dee2e6;border-bottom-right-radius:.3rem;border-bottom-left-radius:.3rem}.modal-footer>:not(:first-child){margin-left:.25rem}.modal-footer>:not(:last-child){margin-right:.25rem}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width: 576px){.modal-dialog{max-width:500px;margin:1.75rem auto}.modal-dialog-scrollable{max-height:calc(100% - 3.5rem)}.modal-dialog-scrollable .modal-content{max-height:calc(100vh - 3.5rem)}.modal-dialog-centered{min-height:calc(100% - 3.5rem)}.modal-dialog-centered::before{height:calc(100vh - 3.5rem)}.modal-sm{max-width:300px}}@media (min-width: 992px){.modal-lg,.modal-xl{max-width:800px}}@media (min-width: 1200px){.modal-xl{max-width:1140px}}.tooltip{position:absolute;z-index:1070;display:block;margin:0;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:.875rem;word-wrap:break-word;opacity:0}.tooltip.show{opacity:.9}.tooltip .arrow{position:absolute;display:block;width:.8rem;height:.4rem}.tooltip .arrow::before{position:absolute;content:"";border-color:transparent;border-style:solid}.bs-tooltip-top,.bs-tooltip-auto[x-placement^="top"]{padding:.4rem 0}.bs-tooltip-top .arrow,.bs-tooltip-auto[x-placement^="top"] .arrow{bottom:0}.bs-tooltip-top .arrow::before,.bs-tooltip-auto[x-placement^="top"] .arrow::before{top:0;border-width:.4rem .4rem 0;border-top-color:#000}.bs-tooltip-right,.bs-tooltip-auto[x-placement^="right"]{padding:0 .4rem}.bs-tooltip-right .arrow,.bs-tooltip-auto[x-placement^="right"] .arrow{left:0;width:.4rem;height:.8rem}.bs-tooltip-right .arrow::before,.bs-tooltip-auto[x-placement^="right"] .arrow::before{right:0;border-width:.4rem .4rem .4rem 0;border-right-color:#000}.bs-tooltip-bottom,.bs-tooltip-auto[x-placement^="bottom"]{padding:.4rem 0}.bs-tooltip-bottom .arrow,.bs-tooltip-auto[x-placement^="bottom"] .arrow{top:0}.bs-tooltip-bottom .arrow::before,.bs-tooltip-auto[x-placement^="bottom"] .arrow::before{bottom:0;border-width:0 .4rem .4rem;border-bottom-color:#000}.bs-tooltip-left,.bs-tooltip-auto[x-placement^="left"]{padding:0 .4rem}.bs-tooltip-left .arrow,.bs-tooltip-auto[x-placement^="left"] .arrow{right:0;width:.4rem;height:.8rem}.bs-tooltip-left .arrow::before,.bs-tooltip-auto[x-placement^="left"] .arrow::before{left:0;border-width:.4rem 0 .4rem .4rem;border-left-color:#000}.tooltip-inner{max-width:200px;padding:.25rem .5rem;color:#fff;text-align:center;background-color:#000;border-radius:.25rem}.popover{position:absolute;top:0;left:0;z-index:1060;display:block;max-width:276px;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:.875rem;word-wrap:break-word;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,0.2);border-radius:.3rem}.popover .arrow{position:absolute;display:block;width:1rem;height:.5rem;margin:0 .3rem}.popover .arrow::before,.popover .arrow::after{position:absolute;display:block;content:"";border-color:transparent;border-style:solid}.bs-popover-top,.bs-popover-auto[x-placement^="top"]{margin-bottom:.5rem}.bs-popover-top>.arrow,.bs-popover-auto[x-placement^="top"]>.arrow{bottom:calc((.5rem + 1px) * -1)}.bs-popover-top>.arrow::before,.bs-popover-auto[x-placement^="top"]>.arrow::before{bottom:0;border-width:.5rem .5rem 0;border-top-color:rgba(0,0,0,0.25)}.bs-popover-top>.arrow::after,.bs-popover-auto[x-placement^="top"]>.arrow::after{bottom:1px;border-width:.5rem .5rem 0;border-top-color:#fff}.bs-popover-right,.bs-popover-auto[x-placement^="right"]{margin-left:.5rem}.bs-popover-right>.arrow,.bs-popover-auto[x-placement^="right"]>.arrow{left:calc((.5rem + 1px) * -1);width:.5rem;height:1rem;margin:.3rem 0}.bs-popover-right>.arrow::before,.bs-popover-auto[x-placement^="right"]>.arrow::before{left:0;border-width:.5rem .5rem .5rem 0;border-right-color:rgba(0,0,0,0.25)}.bs-popover-right>.arrow::after,.bs-popover-auto[x-placement^="right"]>.arrow::after{left:1px;border-width:.5rem .5rem .5rem 0;border-right-color:#fff}.bs-popover-bottom,.bs-popover-auto[x-placement^="bottom"]{margin-top:.5rem}.bs-popover-bottom>.arrow,.bs-popover-auto[x-placement^="bottom"]>.arrow{top:calc((.5rem + 1px) * -1)}.bs-popover-bottom>.arrow::before,.bs-popover-auto[x-placement^="bottom"]>.arrow::before{top:0;border-width:0 .5rem .5rem .5rem;border-bottom-color:rgba(0,0,0,0.25)}.bs-popover-bottom>.arrow::after,.bs-popover-auto[x-placement^="bottom"]>.arrow::after{top:1px;border-width:0 .5rem .5rem .5rem;border-bottom-color:#fff}.bs-popover-bottom .popover-header::before,.bs-popover-auto[x-placement^="bottom"] .popover-header::before{position:absolute;top:0;left:50%;display:block;width:1rem;margin-left:-.5rem;content:"";border-bottom:1px solid #f7f7f7}.bs-popover-left,.bs-popover-auto[x-placement^="left"]{margin-right:.5rem}.bs-popover-left>.arrow,.bs-popover-auto[x-placement^="left"]>.arrow{right:calc((.5rem + 1px) * -1);width:.5rem;height:1rem;margin:.3rem 0}.bs-popover-left>.arrow::before,.bs-popover-auto[x-placement^="left"]>.arrow::before{right:0;border-width:.5rem 0 .5rem .5rem;border-left-color:rgba(0,0,0,0.25)}.bs-popover-left>.arrow::after,.bs-popover-auto[x-placement^="left"]>.arrow::after{right:1px;border-width:.5rem 0 .5rem .5rem;border-left-color:#fff}.popover-header{padding:.5rem .75rem;margin-bottom:0;font-size:1rem;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-top-left-radius:calc(.3rem - 1px);border-top-right-radius:calc(.3rem - 1px)}.popover-header:empty{display:none}.popover-body{padding:.5rem .75rem;color:#212529}.carousel{position:relative}.carousel.pointer-event{touch-action:pan-y}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner::after{display:block;clear:both;content:""}.carousel-item{position:relative;display:none;float:left;width:100%;margin-right:-100%;-webkit-backface-visibility:hidden;backface-visibility:hidden;transition:transform .6s ease-in-out}@media (prefers-reduced-motion: reduce){.carousel-item{transition:none}}.carousel-item.active,.carousel-item-next,.carousel-item-prev{display:block}.carousel-item-next:not(.carousel-item-left),.active.carousel-item-right{transform:translateX(100%)}.carousel-item-prev:not(.carousel-item-right),.active.carousel-item-left{transform:translateX(-100%)}.carousel-fade .carousel-item{opacity:0;transition-property:opacity;transform:none}.carousel-fade .carousel-item.active,.carousel-fade .carousel-item-next.carousel-item-left,.carousel-fade .carousel-item-prev.carousel-item-right{z-index:1;opacity:1}.carousel-fade .active.carousel-item-left,.carousel-fade .active.carousel-item-right{z-index:0;opacity:0;transition:0s .6s opacity}@media (prefers-reduced-motion: reduce){.carousel-fade .active.carousel-item-left,.carousel-fade .active.carousel-item-right{transition:none}}.carousel-control-prev,.carousel-control-next{position:absolute;top:0;bottom:0;z-index:1;display:flex;align-items:center;justify-content:center;width:15%;color:#fff;text-align:center;opacity:.5;transition:opacity 0.15s ease}@media (prefers-reduced-motion: reduce){.carousel-control-prev,.carousel-control-next{transition:none}}.carousel-control-prev:hover,.carousel-control-prev:focus,.carousel-control-next:hover,.carousel-control-next:focus{color:#fff;text-decoration:none;outline:0;opacity:.9}.carousel-control-prev{left:0}.carousel-control-next{right:0}.carousel-control-prev-icon,.carousel-control-next-icon{display:inline-block;width:20px;height:20px;background:no-repeat 50% / 100% 100%}.carousel-control-prev-icon{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' viewBox='0 0 8 8'%3e%3cpath d='M5.25 0l-4 4 4 4 1.5-1.5-2.5-2.5 2.5-2.5-1.5-1.5z'/%3e%3c/svg%3e")}.carousel-control-next-icon{background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' viewBox='0 0 8 8'%3e%3cpath d='M2.75 0l-1.5 1.5 2.5 2.5-2.5 2.5 1.5 1.5 4-4-4-4z'/%3e%3c/svg%3e")}.carousel-indicators{position:absolute;right:0;bottom:0;left:0;z-index:15;display:flex;justify-content:center;padding-left:0;margin-right:15%;margin-left:15%;list-style:none}.carousel-indicators li{box-sizing:content-box;flex:0 1 auto;width:30px;height:3px;margin-right:3px;margin-left:3px;text-indent:-999px;cursor:pointer;background-color:#fff;background-clip:padding-box;border-top:10px solid transparent;border-bottom:10px solid transparent;opacity:.5;transition:opacity 0.6s ease}@media (prefers-reduced-motion: reduce){.carousel-indicators li{transition:none}}.carousel-indicators .active{opacity:1}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center}@-webkit-keyframes spinner-border{to{transform:rotate(360deg)}}@keyframes spinner-border{to{transform:rotate(360deg)}}.spinner-border{display:inline-block;width:2rem;height:2rem;vertical-align:text-bottom;border:.25em solid currentColor;border-right-color:transparent;border-radius:50%;-webkit-animation:spinner-border .75s linear infinite;animation:spinner-border .75s linear infinite}.spinner-border-sm{width:1rem;height:1rem;border-width:.2em}@-webkit-keyframes spinner-grow{0%{transform:scale(0)}50%{opacity:1}}@keyframes spinner-grow{0%{transform:scale(0)}50%{opacity:1}}.spinner-grow{display:inline-block;width:2rem;height:2rem;vertical-align:text-bottom;background-color:currentColor;border-radius:50%;opacity:0;-webkit-animation:spinner-grow .75s linear infinite;animation:spinner-grow .75s linear infinite}.spinner-grow-sm{width:1rem;height:1rem}.align-baseline{vertical-align:baseline !important}.align-top{vertical-align:top !important}.align-middle{vertical-align:middle !important}.align-bottom{vertical-align:bottom !important}.align-text-bottom{vertical-align:text-bottom !important}.align-text-top{vertical-align:text-top !important}.bg-primary{background-color:#007bff !important}a.bg-primary:hover,a.bg-primary:focus,button.bg-primary:hover,button.bg-primary:focus{background-color:#0062cc !important}.bg-secondary{background-color:#6c757d !important}a.bg-secondary:hover,a.bg-secondary:focus,button.bg-secondary:hover,button.bg-secondary:focus{background-color:#545b62 !important}.bg-success{background-color:#28a745 !important}a.bg-success:hover,a.bg-success:focus,button.bg-success:hover,button.bg-success:focus{background-color:#1e7e34 !important}.bg-info{background-color:#17a2b8 !important}a.bg-info:hover,a.bg-info:focus,button.bg-info:hover,button.bg-info:focus{background-color:#117a8b !important}.bg-warning{background-color:#ffc107 !important}a.bg-warning:hover,a.bg-warning:focus,button.bg-warning:hover,button.bg-warning:focus{background-color:#d39e00 !important}.bg-danger{background-color:#dc3545 !important}a.bg-danger:hover,a.bg-danger:focus,button.bg-danger:hover,button.bg-danger:focus{background-color:#bd2130 !important}.bg-light{background-color:#f8f9fa !important}a.bg-light:hover,a.bg-light:focus,button.bg-light:hover,button.bg-light:focus{background-color:#dae0e5 !important}.bg-dark{background-color:#343a40 !important}a.bg-dark:hover,a.bg-dark:focus,button.bg-dark:hover,button.bg-dark:focus{background-color:#1d2124 !important}.bg-white{background-color:#fff !important}.bg-transparent{background-color:transparent !important}.border{border:1px solid #dee2e6 !important}.border-top{border-top:1px solid #dee2e6 !important}.border-right{border-right:1px solid #dee2e6 !important}.border-bottom{border-bottom:1px solid #dee2e6 !important}.border-left{border-left:1px solid #dee2e6 !important}.border-0{border:0 !important}.border-top-0{border-top:0 !important}.border-right-0{border-right:0 !important}.border-bottom-0{border-bottom:0 !important}.border-left-0{border-left:0 !important}.border-primary{border-color:#007bff !important}.border-secondary{border-color:#6c757d !important}.border-success{border-color:#28a745 !important}.border-info{border-color:#17a2b8 !important}.border-warning{border-color:#ffc107 !important}.border-danger{border-color:#dc3545 !important}.border-light{border-color:#f8f9fa !important}.border-dark{border-color:#343a40 !important}.border-white{border-color:#fff !important}.rounded-sm{border-radius:.2rem !important}.rounded{border-radius:.25rem !important}.rounded-top{border-top-left-radius:.25rem !important;border-top-right-radius:.25rem !important}.rounded-right{border-top-right-radius:.25rem !important;border-bottom-right-radius:.25rem !important}.rounded-bottom{border-bottom-right-radius:.25rem !important;border-bottom-left-radius:.25rem !important}.rounded-left{border-top-left-radius:.25rem !important;border-bottom-left-radius:.25rem !important}.rounded-lg{border-radius:.3rem !important}.rounded-circle{border-radius:50% !important}.rounded-pill{border-radius:50rem !important}.rounded-0{border-radius:0 !important}.clearfix::after{display:block;clear:both;content:""}.d-none{display:none !important}.d-inline{display:inline !important}.d-inline-block{display:inline-block !important}.d-block{display:block !important}.d-table{display:table !important}.d-table-row{display:table-row !important}.d-table-cell{display:table-cell !important}.d-flex{display:flex !important}.d-inline-flex{display:inline-flex !important}@media (min-width: 576px){.d-sm-none{display:none !important}.d-sm-inline{display:inline !important}.d-sm-inline-block{display:inline-block !important}.d-sm-block{display:block !important}.d-sm-table{display:table !important}.d-sm-table-row{display:table-row !important}.d-sm-table-cell{display:table-cell !important}.d-sm-flex{display:flex !important}.d-sm-inline-flex{display:inline-flex !important}}@media (min-width: 768px){.d-md-none{display:none !important}.d-md-inline{display:inline !important}.d-md-inline-block{display:inline-block !important}.d-md-block{display:block !important}.d-md-table{display:table !important}.d-md-table-row{display:table-row !important}.d-md-table-cell{display:table-cell !important}.d-md-flex{display:flex !important}.d-md-inline-flex{display:inline-flex !important}}@media (min-width: 992px){.d-lg-none{display:none !important}.d-lg-inline{display:inline !important}.d-lg-inline-block{display:inline-block !important}.d-lg-block{display:block !important}.d-lg-table{display:table !important}.d-lg-table-row{display:table-row !important}.d-lg-table-cell{display:table-cell !important}.d-lg-flex{display:flex !important}.d-lg-inline-flex{display:inline-flex !important}}@media (min-width: 1200px){.d-xl-none{display:none !important}.d-xl-inline{display:inline !important}.d-xl-inline-block{display:inline-block !important}.d-xl-block{display:block !important}.d-xl-table{display:table !important}.d-xl-table-row{display:table-row !important}.d-xl-table-cell{display:table-cell !important}.d-xl-flex{display:flex !important}.d-xl-inline-flex{display:inline-flex !important}}@media print{.d-print-none{display:none !important}.d-print-inline{display:inline !important}.d-print-inline-block{display:inline-block !important}.d-print-block{display:block !important}.d-print-table{display:table !important}.d-print-table-row{display:table-row !important}.d-print-table-cell{display:table-cell !important}.d-print-flex{display:flex !important}.d-print-inline-flex{display:inline-flex !important}}.embed-responsive{position:relative;display:block;width:100%;padding:0;overflow:hidden}.embed-responsive::before{display:block;content:""}.embed-responsive .embed-responsive-item,.embed-responsive iframe,.embed-responsive embed,.embed-responsive object,.embed-responsive video{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive-21by9::before{padding-top:42.8571428571%}.embed-responsive-16by9::before{padding-top:56.25%}.embed-responsive-4by3::before{padding-top:75%}.embed-responsive-1by1::before{padding-top:100%}.flex-row{flex-direction:row !important}.flex-column{flex-direction:column !important}.flex-row-reverse{flex-direction:row-reverse !important}.flex-column-reverse{flex-direction:column-reverse !important}.flex-wrap{flex-wrap:wrap !important}.flex-nowrap{flex-wrap:nowrap !important}.flex-wrap-reverse{flex-wrap:wrap-reverse !important}.flex-fill{flex:1 1 auto !important}.flex-grow-0{flex-grow:0 !important}.flex-grow-1{flex-grow:1 !important}.flex-shrink-0{flex-shrink:0 !important}.flex-shrink-1{flex-shrink:1 !important}.justify-content-start{justify-content:flex-start !important}.justify-content-end{justify-content:flex-end !important}.justify-content-center{justify-content:center !important}.justify-content-between{justify-content:space-between !important}.justify-content-around{justify-content:space-around !important}.align-items-start{align-items:flex-start !important}.align-items-end{align-items:flex-end !important}.align-items-center{align-items:center !important}.align-items-baseline{align-items:baseline !important}.align-items-stretch{align-items:stretch !important}.align-content-start{align-content:flex-start !important}.align-content-end{align-content:flex-end !important}.align-content-center{align-content:center !important}.align-content-between{align-content:space-between !important}.align-content-around{align-content:space-around !important}.align-content-stretch{align-content:stretch !important}.align-self-auto{align-self:auto !important}.align-self-start{align-self:flex-start !important}.align-self-end{align-self:flex-end !important}.align-self-center{align-self:center !important}.align-self-baseline{align-self:baseline !important}.align-self-stretch{align-self:stretch !important}@media (min-width: 576px){.flex-sm-row{flex-direction:row !important}.flex-sm-column{flex-direction:column !important}.flex-sm-row-reverse{flex-direction:row-reverse !important}.flex-sm-column-reverse{flex-direction:column-reverse !important}.flex-sm-wrap{flex-wrap:wrap !important}.flex-sm-nowrap{flex-wrap:nowrap !important}.flex-sm-wrap-reverse{flex-wrap:wrap-reverse !important}.flex-sm-fill{flex:1 1 auto !important}.flex-sm-grow-0{flex-grow:0 !important}.flex-sm-grow-1{flex-grow:1 !important}.flex-sm-shrink-0{flex-shrink:0 !important}.flex-sm-shrink-1{flex-shrink:1 !important}.justify-content-sm-start{justify-content:flex-start !important}.justify-content-sm-end{justify-content:flex-end !important}.justify-content-sm-center{justify-content:center !important}.justify-content-sm-between{justify-content:space-between !important}.justify-content-sm-around{justify-content:space-around !important}.align-items-sm-start{align-items:flex-start !important}.align-items-sm-end{align-items:flex-end !important}.align-items-sm-center{align-items:center !important}.align-items-sm-baseline{align-items:baseline !important}.align-items-sm-stretch{align-items:stretch !important}.align-content-sm-start{align-content:flex-start !important}.align-content-sm-end{align-content:flex-end !important}.align-content-sm-center{align-content:center !important}.align-content-sm-between{align-content:space-between !important}.align-content-sm-around{align-content:space-around !important}.align-content-sm-stretch{align-content:stretch !important}.align-self-sm-auto{align-self:auto !important}.align-self-sm-start{align-self:flex-start !important}.align-self-sm-end{align-self:flex-end !important}.align-self-sm-center{align-self:center !important}.align-self-sm-baseline{align-self:baseline !important}.align-self-sm-stretch{align-self:stretch !important}}@media (min-width: 768px){.flex-md-row{flex-direction:row !important}.flex-md-column{flex-direction:column !important}.flex-md-row-reverse{flex-direction:row-reverse !important}.flex-md-column-reverse{flex-direction:column-reverse !important}.flex-md-wrap{flex-wrap:wrap !important}.flex-md-nowrap{flex-wrap:nowrap !important}.flex-md-wrap-reverse{flex-wrap:wrap-reverse !important}.flex-md-fill{flex:1 1 auto !important}.flex-md-grow-0{flex-grow:0 !important}.flex-md-grow-1{flex-grow:1 !important}.flex-md-shrink-0{flex-shrink:0 !important}.flex-md-shrink-1{flex-shrink:1 !important}.justify-content-md-start{justify-content:flex-start !important}.justify-content-md-end{justify-content:flex-end !important}.justify-content-md-center{justify-content:center !important}.justify-content-md-between{justify-content:space-between !important}.justify-content-md-around{justify-content:space-around !important}.align-items-md-start{align-items:flex-start !important}.align-items-md-end{align-items:flex-end !important}.align-items-md-center{align-items:center !important}.align-items-md-baseline{align-items:baseline !important}.align-items-md-stretch{align-items:stretch !important}.align-content-md-start{align-content:flex-start !important}.align-content-md-end{align-content:flex-end !important}.align-content-md-center{align-content:center !important}.align-content-md-between{align-content:space-between !important}.align-content-md-around{align-content:space-around !important}.align-content-md-stretch{align-content:stretch !important}.align-self-md-auto{align-self:auto !important}.align-self-md-start{align-self:flex-start !important}.align-self-md-end{align-self:flex-end !important}.align-self-md-center{align-self:center !important}.align-self-md-baseline{align-self:baseline !important}.align-self-md-stretch{align-self:stretch !important}}@media (min-width: 992px){.flex-lg-row{flex-direction:row !important}.flex-lg-column{flex-direction:column !important}.flex-lg-row-reverse{flex-direction:row-reverse !important}.flex-lg-column-reverse{flex-direction:column-reverse !important}.flex-lg-wrap{flex-wrap:wrap !important}.flex-lg-nowrap{flex-wrap:nowrap !important}.flex-lg-wrap-reverse{flex-wrap:wrap-reverse !important}.flex-lg-fill{flex:1 1 auto !important}.flex-lg-grow-0{flex-grow:0 !important}.flex-lg-grow-1{flex-grow:1 !important}.flex-lg-shrink-0{flex-shrink:0 !important}.flex-lg-shrink-1{flex-shrink:1 !important}.justify-content-lg-start{justify-content:flex-start !important}.justify-content-lg-end{justify-content:flex-end !important}.justify-content-lg-center{justify-content:center !important}.justify-content-lg-between{justify-content:space-between !important}.justify-content-lg-around{justify-content:space-around !important}.align-items-lg-start{align-items:flex-start !important}.align-items-lg-end{align-items:flex-end !important}.align-items-lg-center{align-items:center !important}.align-items-lg-baseline{align-items:baseline !important}.align-items-lg-stretch{align-items:stretch !important}.align-content-lg-start{align-content:flex-start !important}.align-content-lg-end{align-content:flex-end !important}.align-content-lg-center{align-content:center !important}.align-content-lg-between{align-content:space-between !important}.align-content-lg-around{align-content:space-around !important}.align-content-lg-stretch{align-content:stretch !important}.align-self-lg-auto{align-self:auto !important}.align-self-lg-start{align-self:flex-start !important}.align-self-lg-end{align-self:flex-end !important}.align-self-lg-center{align-self:center !important}.align-self-lg-baseline{align-self:baseline !important}.align-self-lg-stretch{align-self:stretch !important}}@media (min-width: 1200px){.flex-xl-row{flex-direction:row !important}.flex-xl-column{flex-direction:column !important}.flex-xl-row-reverse{flex-direction:row-reverse !important}.flex-xl-column-reverse{flex-direction:column-reverse !important}.flex-xl-wrap{flex-wrap:wrap !important}.flex-xl-nowrap{flex-wrap:nowrap !important}.flex-xl-wrap-reverse{flex-wrap:wrap-reverse !important}.flex-xl-fill{flex:1 1 auto !important}.flex-xl-grow-0{flex-grow:0 !important}.flex-xl-grow-1{flex-grow:1 !important}.flex-xl-shrink-0{flex-shrink:0 !important}.flex-xl-shrink-1{flex-shrink:1 !important}.justify-content-xl-start{justify-content:flex-start !important}.justify-content-xl-end{justify-content:flex-end !important}.justify-content-xl-center{justify-content:center !important}.justify-content-xl-between{justify-content:space-between !important}.justify-content-xl-around{justify-content:space-around !important}.align-items-xl-start{align-items:flex-start !important}.align-items-xl-end{align-items:flex-end !important}.align-items-xl-center{align-items:center !important}.align-items-xl-baseline{align-items:baseline !important}.align-items-xl-stretch{align-items:stretch !important}.align-content-xl-start{align-content:flex-start !important}.align-content-xl-end{align-content:flex-end !important}.align-content-xl-center{align-content:center !important}.align-content-xl-between{align-content:space-between !important}.align-content-xl-around{align-content:space-around !important}.align-content-xl-stretch{align-content:stretch !important}.align-self-xl-auto{align-self:auto !important}.align-self-xl-start{align-self:flex-start !important}.align-self-xl-end{align-self:flex-end !important}.align-self-xl-center{align-self:center !important}.align-self-xl-baseline{align-self:baseline !important}.align-self-xl-stretch{align-self:stretch !important}}.float-left{float:left !important}.float-right{float:right !important}.float-none{float:none !important}@media (min-width: 576px){.float-sm-left{float:left !important}.float-sm-right{float:right !important}.float-sm-none{float:none !important}}@media (min-width: 768px){.float-md-left{float:left !important}.float-md-right{float:right !important}.float-md-none{float:none !important}}@media (min-width: 992px){.float-lg-left{float:left !important}.float-lg-right{float:right !important}.float-lg-none{float:none !important}}@media (min-width: 1200px){.float-xl-left{float:left !important}.float-xl-right{float:right !important}.float-xl-none{float:none !important}}.overflow-auto{overflow:auto !important}.overflow-hidden{overflow:hidden !important}.position-static{position:static !important}.position-relative{position:relative !important}.position-absolute{position:absolute !important}.position-fixed{position:fixed !important}.position-sticky{position:-webkit-sticky !important;position:sticky !important}.fixed-top{position:fixed;top:0;right:0;left:0;z-index:1030}.fixed-bottom{position:fixed;right:0;bottom:0;left:0;z-index:1030}@supports ((position: -webkit-sticky) or (position: sticky)){.sticky-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}}.sr-only{position:absolute;width:1px;height:1px;padding:0;overflow:hidden;clip:rect(0, 0, 0, 0);white-space:nowrap;border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;overflow:visible;clip:auto;white-space:normal}.shadow-sm{box-shadow:0 0.125rem 0.25rem rgba(0,0,0,0.075) !important}.shadow{box-shadow:0 0.5rem 1rem rgba(0,0,0,0.15) !important}.shadow-lg{box-shadow:0 1rem 3rem rgba(0,0,0,0.175) !important}.shadow-none{box-shadow:none !important}.w-25{width:25% !important}.w-50{width:50% !important}.w-75{width:75% !important}.w-100{width:100% !important}.w-auto{width:auto !important}.h-25{height:25% !important}.h-50{height:50% !important}.h-75{height:75% !important}.h-100{height:100% !important}.h-auto{height:auto !important}.mw-100{max-width:100% !important}.mh-100{max-height:100% !important}.min-vw-100{min-width:100vw !important}.min-vh-100{min-height:100vh !important}.vw-100{width:100vw !important}.vh-100{height:100vh !important}.stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;pointer-events:auto;content:"";background-color:transparent}.m-0{margin:0 !important}.mt-0,.my-0{margin-top:0 !important}.mr-0,.mx-0{margin-right:0 !important}.mb-0,.my-0{margin-bottom:0 !important}.ml-0,.mx-0{margin-left:0 !important}.m-1{margin:.25rem !important}.mt-1,.my-1{margin-top:.25rem !important}.mr-1,.mx-1{margin-right:.25rem !important}.mb-1,.my-1{margin-bottom:.25rem !important}.ml-1,.mx-1{margin-left:.25rem !important}.m-2{margin:.5rem !important}.mt-2,.my-2{margin-top:.5rem !important}.mr-2,.mx-2{margin-right:.5rem !important}.mb-2,.my-2{margin-bottom:.5rem !important}.ml-2,.mx-2{margin-left:.5rem !important}.m-3{margin:1rem !important}.mt-3,.my-3{margin-top:1rem !important}.mr-3,.mx-3{margin-right:1rem !important}.mb-3,.my-3{margin-bottom:1rem !important}.ml-3,.mx-3{margin-left:1rem !important}.m-4{margin:1.5rem !important}.mt-4,.my-4{margin-top:1.5rem !important}.mr-4,.mx-4{margin-right:1.5rem !important}.mb-4,.my-4{margin-bottom:1.5rem !important}.ml-4,.mx-4{margin-left:1.5rem !important}.m-5{margin:3rem !important}.mt-5,.my-5{margin-top:3rem !important}.mr-5,.mx-5{margin-right:3rem !important}.mb-5,.my-5{margin-bottom:3rem !important}.ml-5,.mx-5{margin-left:3rem !important}.p-0{padding:0 !important}.pt-0,.py-0{padding-top:0 !important}.pr-0,.px-0{padding-right:0 !important}.pb-0,.py-0{padding-bottom:0 !important}.pl-0,.px-0{padding-left:0 !important}.p-1{padding:.25rem !important}.pt-1,.py-1{padding-top:.25rem !important}.pr-1,.px-1{padding-right:.25rem !important}.pb-1,.py-1{padding-bottom:.25rem !important}.pl-1,.px-1{padding-left:.25rem !important}.p-2{padding:.5rem !important}.pt-2,.py-2{padding-top:.5rem !important}.pr-2,.px-2{padding-right:.5rem !important}.pb-2,.py-2{padding-bottom:.5rem !important}.pl-2,.px-2{padding-left:.5rem !important}.p-3{padding:1rem !important}.pt-3,.py-3{padding-top:1rem !important}.pr-3,.px-3{padding-right:1rem !important}.pb-3,.py-3{padding-bottom:1rem !important}.pl-3,.px-3{padding-left:1rem !important}.p-4{padding:1.5rem !important}.pt-4,.py-4{padding-top:1.5rem !important}.pr-4,.px-4{padding-right:1.5rem !important}.pb-4,.py-4{padding-bottom:1.5rem !important}.pl-4,.px-4{padding-left:1.5rem !important}.p-5{padding:3rem !important}.pt-5,.py-5{padding-top:3rem !important}.pr-5,.px-5{padding-right:3rem !important}.pb-5,.py-5{padding-bottom:3rem !important}.pl-5,.px-5{padding-left:3rem !important}.m-n1{margin:-.25rem !important}.mt-n1,.my-n1{margin-top:-.25rem !important}.mr-n1,.mx-n1{margin-right:-.25rem !important}.mb-n1,.my-n1{margin-bottom:-.25rem !important}.ml-n1,.mx-n1{margin-left:-.25rem !important}.m-n2{margin:-.5rem !important}.mt-n2,.my-n2{margin-top:-.5rem !important}.mr-n2,.mx-n2{margin-right:-.5rem !important}.mb-n2,.my-n2{margin-bottom:-.5rem !important}.ml-n2,.mx-n2{margin-left:-.5rem !important}.m-n3{margin:-1rem !important}.mt-n3,.my-n3{margin-top:-1rem !important}.mr-n3,.mx-n3{margin-right:-1rem !important}.mb-n3,.my-n3{margin-bottom:-1rem !important}.ml-n3,.mx-n3{margin-left:-1rem !important}.m-n4{margin:-1.5rem !important}.mt-n4,.my-n4{margin-top:-1.5rem !important}.mr-n4,.mx-n4{margin-right:-1.5rem !important}.mb-n4,.my-n4{margin-bottom:-1.5rem !important}.ml-n4,.mx-n4{margin-left:-1.5rem !important}.m-n5{margin:-3rem !important}.mt-n5,.my-n5{margin-top:-3rem !important}.mr-n5,.mx-n5{margin-right:-3rem !important}.mb-n5,.my-n5{margin-bottom:-3rem !important}.ml-n5,.mx-n5{margin-left:-3rem !important}.m-auto{margin:auto !important}.mt-auto,.my-auto{margin-top:auto !important}.mr-auto,.mx-auto{margin-right:auto !important}.mb-auto,.my-auto{margin-bottom:auto !important}.ml-auto,.mx-auto{margin-left:auto !important}@media (min-width: 576px){.m-sm-0{margin:0 !important}.mt-sm-0,.my-sm-0{margin-top:0 !important}.mr-sm-0,.mx-sm-0{margin-right:0 !important}.mb-sm-0,.my-sm-0{margin-bottom:0 !important}.ml-sm-0,.mx-sm-0{margin-left:0 !important}.m-sm-1{margin:.25rem !important}.mt-sm-1,.my-sm-1{margin-top:.25rem !important}.mr-sm-1,.mx-sm-1{margin-right:.25rem !important}.mb-sm-1,.my-sm-1{margin-bottom:.25rem !important}.ml-sm-1,.mx-sm-1{margin-left:.25rem !important}.m-sm-2{margin:.5rem !important}.mt-sm-2,.my-sm-2{margin-top:.5rem !important}.mr-sm-2,.mx-sm-2{margin-right:.5rem !important}.mb-sm-2,.my-sm-2{margin-bottom:.5rem !important}.ml-sm-2,.mx-sm-2{margin-left:.5rem !important}.m-sm-3{margin:1rem !important}.mt-sm-3,.my-sm-3{margin-top:1rem !important}.mr-sm-3,.mx-sm-3{margin-right:1rem !important}.mb-sm-3,.my-sm-3{margin-bottom:1rem !important}.ml-sm-3,.mx-sm-3{margin-left:1rem !important}.m-sm-4{margin:1.5rem !important}.mt-sm-4,.my-sm-4{margin-top:1.5rem !important}.mr-sm-4,.mx-sm-4{margin-right:1.5rem !important}.mb-sm-4,.my-sm-4{margin-bottom:1.5rem !important}.ml-sm-4,.mx-sm-4{margin-left:1.5rem !important}.m-sm-5{margin:3rem !important}.mt-sm-5,.my-sm-5{margin-top:3rem !important}.mr-sm-5,.mx-sm-5{margin-right:3rem !important}.mb-sm-5,.my-sm-5{margin-bottom:3rem !important}.ml-sm-5,.mx-sm-5{margin-left:3rem !important}.p-sm-0{padding:0 !important}.pt-sm-0,.py-sm-0{padding-top:0 !important}.pr-sm-0,.px-sm-0{padding-right:0 !important}.pb-sm-0,.py-sm-0{padding-bottom:0 !important}.pl-sm-0,.px-sm-0{padding-left:0 !important}.p-sm-1{padding:.25rem !important}.pt-sm-1,.py-sm-1{padding-top:.25rem !important}.pr-sm-1,.px-sm-1{padding-right:.25rem !important}.pb-sm-1,.py-sm-1{padding-bottom:.25rem !important}.pl-sm-1,.px-sm-1{padding-left:.25rem !important}.p-sm-2{padding:.5rem !important}.pt-sm-2,.py-sm-2{padding-top:.5rem !important}.pr-sm-2,.px-sm-2{padding-right:.5rem !important}.pb-sm-2,.py-sm-2{padding-bottom:.5rem !important}.pl-sm-2,.px-sm-2{padding-left:.5rem !important}.p-sm-3{padding:1rem !important}.pt-sm-3,.py-sm-3{padding-top:1rem !important}.pr-sm-3,.px-sm-3{padding-right:1rem !important}.pb-sm-3,.py-sm-3{padding-bottom:1rem !important}.pl-sm-3,.px-sm-3{padding-left:1rem !important}.p-sm-4{padding:1.5rem !important}.pt-sm-4,.py-sm-4{padding-top:1.5rem !important}.pr-sm-4,.px-sm-4{padding-right:1.5rem !important}.pb-sm-4,.py-sm-4{padding-bottom:1.5rem !important}.pl-sm-4,.px-sm-4{padding-left:1.5rem !important}.p-sm-5{padding:3rem !important}.pt-sm-5,.py-sm-5{padding-top:3rem !important}.pr-sm-5,.px-sm-5{padding-right:3rem !important}.pb-sm-5,.py-sm-5{padding-bottom:3rem !important}.pl-sm-5,.px-sm-5{padding-left:3rem !important}.m-sm-n1{margin:-.25rem !important}.mt-sm-n1,.my-sm-n1{margin-top:-.25rem !important}.mr-sm-n1,.mx-sm-n1{margin-right:-.25rem !important}.mb-sm-n1,.my-sm-n1{margin-bottom:-.25rem !important}.ml-sm-n1,.mx-sm-n1{margin-left:-.25rem !important}.m-sm-n2{margin:-.5rem !important}.mt-sm-n2,.my-sm-n2{margin-top:-.5rem !important}.mr-sm-n2,.mx-sm-n2{margin-right:-.5rem !important}.mb-sm-n2,.my-sm-n2{margin-bottom:-.5rem !important}.ml-sm-n2,.mx-sm-n2{margin-left:-.5rem !important}.m-sm-n3{margin:-1rem !important}.mt-sm-n3,.my-sm-n3{margin-top:-1rem !important}.mr-sm-n3,.mx-sm-n3{margin-right:-1rem !important}.mb-sm-n3,.my-sm-n3{margin-bottom:-1rem !important}.ml-sm-n3,.mx-sm-n3{margin-left:-1rem !important}.m-sm-n4{margin:-1.5rem !important}.mt-sm-n4,.my-sm-n4{margin-top:-1.5rem !important}.mr-sm-n4,.mx-sm-n4{margin-right:-1.5rem !important}.mb-sm-n4,.my-sm-n4{margin-bottom:-1.5rem !important}.ml-sm-n4,.mx-sm-n4{margin-left:-1.5rem !important}.m-sm-n5{margin:-3rem !important}.mt-sm-n5,.my-sm-n5{margin-top:-3rem !important}.mr-sm-n5,.mx-sm-n5{margin-right:-3rem !important}.mb-sm-n5,.my-sm-n5{margin-bottom:-3rem !important}.ml-sm-n5,.mx-sm-n5{margin-left:-3rem !important}.m-sm-auto{margin:auto !important}.mt-sm-auto,.my-sm-auto{margin-top:auto !important}.mr-sm-auto,.mx-sm-auto{margin-right:auto !important}.mb-sm-auto,.my-sm-auto{margin-bottom:auto !important}.ml-sm-auto,.mx-sm-auto{margin-left:auto !important}}@media (min-width: 768px){.m-md-0{margin:0 !important}.mt-md-0,.my-md-0{margin-top:0 !important}.mr-md-0,.mx-md-0{margin-right:0 !important}.mb-md-0,.my-md-0{margin-bottom:0 !important}.ml-md-0,.mx-md-0{margin-left:0 !important}.m-md-1{margin:.25rem !important}.mt-md-1,.my-md-1{margin-top:.25rem !important}.mr-md-1,.mx-md-1{margin-right:.25rem !important}.mb-md-1,.my-md-1{margin-bottom:.25rem !important}.ml-md-1,.mx-md-1{margin-left:.25rem !important}.m-md-2{margin:.5rem !important}.mt-md-2,.my-md-2{margin-top:.5rem !important}.mr-md-2,.mx-md-2{margin-right:.5rem !important}.mb-md-2,.my-md-2{margin-bottom:.5rem !important}.ml-md-2,.mx-md-2{margin-left:.5rem !important}.m-md-3{margin:1rem !important}.mt-md-3,.my-md-3{margin-top:1rem !important}.mr-md-3,.mx-md-3{margin-right:1rem !important}.mb-md-3,.my-md-3{margin-bottom:1rem !important}.ml-md-3,.mx-md-3{margin-left:1rem !important}.m-md-4{margin:1.5rem !important}.mt-md-4,.my-md-4{margin-top:1.5rem !important}.mr-md-4,.mx-md-4{margin-right:1.5rem !important}.mb-md-4,.my-md-4{margin-bottom:1.5rem !important}.ml-md-4,.mx-md-4{margin-left:1.5rem !important}.m-md-5{margin:3rem !important}.mt-md-5,.my-md-5{margin-top:3rem !important}.mr-md-5,.mx-md-5{margin-right:3rem !important}.mb-md-5,.my-md-5{margin-bottom:3rem !important}.ml-md-5,.mx-md-5{margin-left:3rem !important}.p-md-0{padding:0 !important}.pt-md-0,.py-md-0{padding-top:0 !important}.pr-md-0,.px-md-0{padding-right:0 !important}.pb-md-0,.py-md-0{padding-bottom:0 !important}.pl-md-0,.px-md-0{padding-left:0 !important}.p-md-1{padding:.25rem !important}.pt-md-1,.py-md-1{padding-top:.25rem !important}.pr-md-1,.px-md-1{padding-right:.25rem !important}.pb-md-1,.py-md-1{padding-bottom:.25rem !important}.pl-md-1,.px-md-1{padding-left:.25rem !important}.p-md-2{padding:.5rem !important}.pt-md-2,.py-md-2{padding-top:.5rem !important}.pr-md-2,.px-md-2{padding-right:.5rem !important}.pb-md-2,.py-md-2{padding-bottom:.5rem !important}.pl-md-2,.px-md-2{padding-left:.5rem !important}.p-md-3{padding:1rem !important}.pt-md-3,.py-md-3{padding-top:1rem !important}.pr-md-3,.px-md-3{padding-right:1rem !important}.pb-md-3,.py-md-3{padding-bottom:1rem !important}.pl-md-3,.px-md-3{padding-left:1rem !important}.p-md-4{padding:1.5rem !important}.pt-md-4,.py-md-4{padding-top:1.5rem !important}.pr-md-4,.px-md-4{padding-right:1.5rem !important}.pb-md-4,.py-md-4{padding-bottom:1.5rem !important}.pl-md-4,.px-md-4{padding-left:1.5rem !important}.p-md-5{padding:3rem !important}.pt-md-5,.py-md-5{padding-top:3rem !important}.pr-md-5,.px-md-5{padding-right:3rem !important}.pb-md-5,.py-md-5{padding-bottom:3rem !important}.pl-md-5,.px-md-5{padding-left:3rem !important}.m-md-n1{margin:-.25rem !important}.mt-md-n1,.my-md-n1{margin-top:-.25rem !important}.mr-md-n1,.mx-md-n1{margin-right:-.25rem !important}.mb-md-n1,.my-md-n1{margin-bottom:-.25rem !important}.ml-md-n1,.mx-md-n1{margin-left:-.25rem !important}.m-md-n2{margin:-.5rem !important}.mt-md-n2,.my-md-n2{margin-top:-.5rem !important}.mr-md-n2,.mx-md-n2{margin-right:-.5rem !important}.mb-md-n2,.my-md-n2{margin-bottom:-.5rem !important}.ml-md-n2,.mx-md-n2{margin-left:-.5rem !important}.m-md-n3{margin:-1rem !important}.mt-md-n3,.my-md-n3{margin-top:-1rem !important}.mr-md-n3,.mx-md-n3{margin-right:-1rem !important}.mb-md-n3,.my-md-n3{margin-bottom:-1rem !important}.ml-md-n3,.mx-md-n3{margin-left:-1rem !important}.m-md-n4{margin:-1.5rem !important}.mt-md-n4,.my-md-n4{margin-top:-1.5rem !important}.mr-md-n4,.mx-md-n4{margin-right:-1.5rem !important}.mb-md-n4,.my-md-n4{margin-bottom:-1.5rem !important}.ml-md-n4,.mx-md-n4{margin-left:-1.5rem !important}.m-md-n5{margin:-3rem !important}.mt-md-n5,.my-md-n5{margin-top:-3rem !important}.mr-md-n5,.mx-md-n5{margin-right:-3rem !important}.mb-md-n5,.my-md-n5{margin-bottom:-3rem !important}.ml-md-n5,.mx-md-n5{margin-left:-3rem !important}.m-md-auto{margin:auto !important}.mt-md-auto,.my-md-auto{margin-top:auto !important}.mr-md-auto,.mx-md-auto{margin-right:auto !important}.mb-md-auto,.my-md-auto{margin-bottom:auto !important}.ml-md-auto,.mx-md-auto{margin-left:auto !important}}@media (min-width: 992px){.m-lg-0{margin:0 !important}.mt-lg-0,.my-lg-0{margin-top:0 !important}.mr-lg-0,.mx-lg-0{margin-right:0 !important}.mb-lg-0,.my-lg-0{margin-bottom:0 !important}.ml-lg-0,.mx-lg-0{margin-left:0 !important}.m-lg-1{margin:.25rem !important}.mt-lg-1,.my-lg-1{margin-top:.25rem !important}.mr-lg-1,.mx-lg-1{margin-right:.25rem !important}.mb-lg-1,.my-lg-1{margin-bottom:.25rem !important}.ml-lg-1,.mx-lg-1{margin-left:.25rem !important}.m-lg-2{margin:.5rem !important}.mt-lg-2,.my-lg-2{margin-top:.5rem !important}.mr-lg-2,.mx-lg-2{margin-right:.5rem !important}.mb-lg-2,.my-lg-2{margin-bottom:.5rem !important}.ml-lg-2,.mx-lg-2{margin-left:.5rem !important}.m-lg-3{margin:1rem !important}.mt-lg-3,.my-lg-3{margin-top:1rem !important}.mr-lg-3,.mx-lg-3{margin-right:1rem !important}.mb-lg-3,.my-lg-3{margin-bottom:1rem !important}.ml-lg-3,.mx-lg-3{margin-left:1rem !important}.m-lg-4{margin:1.5rem !important}.mt-lg-4,.my-lg-4{margin-top:1.5rem !important}.mr-lg-4,.mx-lg-4{margin-right:1.5rem !important}.mb-lg-4,.my-lg-4{margin-bottom:1.5rem !important}.ml-lg-4,.mx-lg-4{margin-left:1.5rem !important}.m-lg-5{margin:3rem !important}.mt-lg-5,.my-lg-5{margin-top:3rem !important}.mr-lg-5,.mx-lg-5{margin-right:3rem !important}.mb-lg-5,.my-lg-5{margin-bottom:3rem !important}.ml-lg-5,.mx-lg-5{margin-left:3rem !important}.p-lg-0{padding:0 !important}.pt-lg-0,.py-lg-0{padding-top:0 !important}.pr-lg-0,.px-lg-0{padding-right:0 !important}.pb-lg-0,.py-lg-0{padding-bottom:0 !important}.pl-lg-0,.px-lg-0{padding-left:0 !important}.p-lg-1{padding:.25rem !important}.pt-lg-1,.py-lg-1{padding-top:.25rem !important}.pr-lg-1,.px-lg-1{padding-right:.25rem !important}.pb-lg-1,.py-lg-1{padding-bottom:.25rem !important}.pl-lg-1,.px-lg-1{padding-left:.25rem !important}.p-lg-2{padding:.5rem !important}.pt-lg-2,.py-lg-2{padding-top:.5rem !important}.pr-lg-2,.px-lg-2{padding-right:.5rem !important}.pb-lg-2,.py-lg-2{padding-bottom:.5rem !important}.pl-lg-2,.px-lg-2{padding-left:.5rem !important}.p-lg-3{padding:1rem !important}.pt-lg-3,.py-lg-3{padding-top:1rem !important}.pr-lg-3,.px-lg-3{padding-right:1rem !important}.pb-lg-3,.py-lg-3{padding-bottom:1rem !important}.pl-lg-3,.px-lg-3{padding-left:1rem !important}.p-lg-4{padding:1.5rem !important}.pt-lg-4,.py-lg-4{padding-top:1.5rem !important}.pr-lg-4,.px-lg-4{padding-right:1.5rem !important}.pb-lg-4,.py-lg-4{padding-bottom:1.5rem !important}.pl-lg-4,.px-lg-4{padding-left:1.5rem !important}.p-lg-5{padding:3rem !important}.pt-lg-5,.py-lg-5{padding-top:3rem !important}.pr-lg-5,.px-lg-5{padding-right:3rem !important}.pb-lg-5,.py-lg-5{padding-bottom:3rem !important}.pl-lg-5,.px-lg-5{padding-left:3rem !important}.m-lg-n1{margin:-.25rem !important}.mt-lg-n1,.my-lg-n1{margin-top:-.25rem !important}.mr-lg-n1,.mx-lg-n1{margin-right:-.25rem !important}.mb-lg-n1,.my-lg-n1{margin-bottom:-.25rem !important}.ml-lg-n1,.mx-lg-n1{margin-left:-.25rem !important}.m-lg-n2{margin:-.5rem !important}.mt-lg-n2,.my-lg-n2{margin-top:-.5rem !important}.mr-lg-n2,.mx-lg-n2{margin-right:-.5rem !important}.mb-lg-n2,.my-lg-n2{margin-bottom:-.5rem !important}.ml-lg-n2,.mx-lg-n2{margin-left:-.5rem !important}.m-lg-n3{margin:-1rem !important}.mt-lg-n3,.my-lg-n3{margin-top:-1rem !important}.mr-lg-n3,.mx-lg-n3{margin-right:-1rem !important}.mb-lg-n3,.my-lg-n3{margin-bottom:-1rem !important}.ml-lg-n3,.mx-lg-n3{margin-left:-1rem !important}.m-lg-n4{margin:-1.5rem !important}.mt-lg-n4,.my-lg-n4{margin-top:-1.5rem !important}.mr-lg-n4,.mx-lg-n4{margin-right:-1.5rem !important}.mb-lg-n4,.my-lg-n4{margin-bottom:-1.5rem !important}.ml-lg-n4,.mx-lg-n4{margin-left:-1.5rem !important}.m-lg-n5{margin:-3rem !important}.mt-lg-n5,.my-lg-n5{margin-top:-3rem !important}.mr-lg-n5,.mx-lg-n5{margin-right:-3rem !important}.mb-lg-n5,.my-lg-n5{margin-bottom:-3rem !important}.ml-lg-n5,.mx-lg-n5{margin-left:-3rem !important}.m-lg-auto{margin:auto !important}.mt-lg-auto,.my-lg-auto{margin-top:auto !important}.mr-lg-auto,.mx-lg-auto{margin-right:auto !important}.mb-lg-auto,.my-lg-auto{margin-bottom:auto !important}.ml-lg-auto,.mx-lg-auto{margin-left:auto !important}}@media (min-width: 1200px){.m-xl-0{margin:0 !important}.mt-xl-0,.my-xl-0{margin-top:0 !important}.mr-xl-0,.mx-xl-0{margin-right:0 !important}.mb-xl-0,.my-xl-0{margin-bottom:0 !important}.ml-xl-0,.mx-xl-0{margin-left:0 !important}.m-xl-1{margin:.25rem !important}.mt-xl-1,.my-xl-1{margin-top:.25rem !important}.mr-xl-1,.mx-xl-1{margin-right:.25rem !important}.mb-xl-1,.my-xl-1{margin-bottom:.25rem !important}.ml-xl-1,.mx-xl-1{margin-left:.25rem !important}.m-xl-2{margin:.5rem !important}.mt-xl-2,.my-xl-2{margin-top:.5rem !important}.mr-xl-2,.mx-xl-2{margin-right:.5rem !important}.mb-xl-2,.my-xl-2{margin-bottom:.5rem !important}.ml-xl-2,.mx-xl-2{margin-left:.5rem !important}.m-xl-3{margin:1rem !important}.mt-xl-3,.my-xl-3{margin-top:1rem !important}.mr-xl-3,.mx-xl-3{margin-right:1rem !important}.mb-xl-3,.my-xl-3{margin-bottom:1rem !important}.ml-xl-3,.mx-xl-3{margin-left:1rem !important}.m-xl-4{margin:1.5rem !important}.mt-xl-4,.my-xl-4{margin-top:1.5rem !important}.mr-xl-4,.mx-xl-4{margin-right:1.5rem !important}.mb-xl-4,.my-xl-4{margin-bottom:1.5rem !important}.ml-xl-4,.mx-xl-4{margin-left:1.5rem !important}.m-xl-5{margin:3rem !important}.mt-xl-5,.my-xl-5{margin-top:3rem !important}.mr-xl-5,.mx-xl-5{margin-right:3rem !important}.mb-xl-5,.my-xl-5{margin-bottom:3rem !important}.ml-xl-5,.mx-xl-5{margin-left:3rem !important}.p-xl-0{padding:0 !important}.pt-xl-0,.py-xl-0{padding-top:0 !important}.pr-xl-0,.px-xl-0{padding-right:0 !important}.pb-xl-0,.py-xl-0{padding-bottom:0 !important}.pl-xl-0,.px-xl-0{padding-left:0 !important}.p-xl-1{padding:.25rem !important}.pt-xl-1,.py-xl-1{padding-top:.25rem !important}.pr-xl-1,.px-xl-1{padding-right:.25rem !important}.pb-xl-1,.py-xl-1{padding-bottom:.25rem !important}.pl-xl-1,.px-xl-1{padding-left:.25rem !important}.p-xl-2{padding:.5rem !important}.pt-xl-2,.py-xl-2{padding-top:.5rem !important}.pr-xl-2,.px-xl-2{padding-right:.5rem !important}.pb-xl-2,.py-xl-2{padding-bottom:.5rem !important}.pl-xl-2,.px-xl-2{padding-left:.5rem !important}.p-xl-3{padding:1rem !important}.pt-xl-3,.py-xl-3{padding-top:1rem !important}.pr-xl-3,.px-xl-3{padding-right:1rem !important}.pb-xl-3,.py-xl-3{padding-bottom:1rem !important}.pl-xl-3,.px-xl-3{padding-left:1rem !important}.p-xl-4{padding:1.5rem !important}.pt-xl-4,.py-xl-4{padding-top:1.5rem !important}.pr-xl-4,.px-xl-4{padding-right:1.5rem !important}.pb-xl-4,.py-xl-4{padding-bottom:1.5rem !important}.pl-xl-4,.px-xl-4{padding-left:1.5rem !important}.p-xl-5{padding:3rem !important}.pt-xl-5,.py-xl-5{padding-top:3rem !important}.pr-xl-5,.px-xl-5{padding-right:3rem !important}.pb-xl-5,.py-xl-5{padding-bottom:3rem !important}.pl-xl-5,.px-xl-5{padding-left:3rem !important}.m-xl-n1{margin:-.25rem !important}.mt-xl-n1,.my-xl-n1{margin-top:-.25rem !important}.mr-xl-n1,.mx-xl-n1{margin-right:-.25rem !important}.mb-xl-n1,.my-xl-n1{margin-bottom:-.25rem !important}.ml-xl-n1,.mx-xl-n1{margin-left:-.25rem !important}.m-xl-n2{margin:-.5rem !important}.mt-xl-n2,.my-xl-n2{margin-top:-.5rem !important}.mr-xl-n2,.mx-xl-n2{margin-right:-.5rem !important}.mb-xl-n2,.my-xl-n2{margin-bottom:-.5rem !important}.ml-xl-n2,.mx-xl-n2{margin-left:-.5rem !important}.m-xl-n3{margin:-1rem !important}.mt-xl-n3,.my-xl-n3{margin-top:-1rem !important}.mr-xl-n3,.mx-xl-n3{margin-right:-1rem !important}.mb-xl-n3,.my-xl-n3{margin-bottom:-1rem !important}.ml-xl-n3,.mx-xl-n3{margin-left:-1rem !important}.m-xl-n4{margin:-1.5rem !important}.mt-xl-n4,.my-xl-n4{margin-top:-1.5rem !important}.mr-xl-n4,.mx-xl-n4{margin-right:-1.5rem !important}.mb-xl-n4,.my-xl-n4{margin-bottom:-1.5rem !important}.ml-xl-n4,.mx-xl-n4{margin-left:-1.5rem !important}.m-xl-n5{margin:-3rem !important}.mt-xl-n5,.my-xl-n5{margin-top:-3rem !important}.mr-xl-n5,.mx-xl-n5{margin-right:-3rem !important}.mb-xl-n5,.my-xl-n5{margin-bottom:-3rem !important}.ml-xl-n5,.mx-xl-n5{margin-left:-3rem !important}.m-xl-auto{margin:auto !important}.mt-xl-auto,.my-xl-auto{margin-top:auto !important}.mr-xl-auto,.mx-xl-auto{margin-right:auto !important}.mb-xl-auto,.my-xl-auto{margin-bottom:auto !important}.ml-xl-auto,.mx-xl-auto{margin-left:auto !important}}.text-monospace{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace !important}.text-justify{text-align:justify !important}.text-wrap{white-space:normal !important}.text-nowrap{white-space:nowrap !important}.text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.text-left{text-align:left !important}.text-right{text-align:right !important}.text-center{text-align:center !important}@media (min-width: 576px){.text-sm-left{text-align:left !important}.text-sm-right{text-align:right !important}.text-sm-center{text-align:center !important}}@media (min-width: 768px){.text-md-left{text-align:left !important}.text-md-right{text-align:right !important}.text-md-center{text-align:center !important}}@media (min-width: 992px){.text-lg-left{text-align:left !important}.text-lg-right{text-align:right !important}.text-lg-center{text-align:center !important}}@media (min-width: 1200px){.text-xl-left{text-align:left !important}.text-xl-right{text-align:right !important}.text-xl-center{text-align:center !important}}.text-lowercase{text-transform:lowercase !important}.text-uppercase{text-transform:uppercase !important}.text-capitalize{text-transform:capitalize !important}.font-weight-light{font-weight:300 !important}.font-weight-lighter{font-weight:lighter !important}.font-weight-normal{font-weight:400 !important}.font-weight-bold{font-weight:700 !important}.font-weight-bolder{font-weight:bolder !important}.font-italic{font-style:italic !important}.text-white{color:#fff !important}.text-primary{color:#007bff !important}a.text-primary:hover,a.text-primary:focus{color:#0056b3 !important}.text-secondary{color:#6c757d !important}a.text-secondary:hover,a.text-secondary:focus{color:#494f54 !important}.text-success{color:#28a745 !important}a.text-success:hover,a.text-success:focus{color:#19692c !important}.text-info{color:#17a2b8 !important}a.text-info:hover,a.text-info:focus{color:#0f6674 !important}.text-warning{color:#ffc107 !important}a.text-warning:hover,a.text-warning:focus{color:#ba8b00 !important}.text-danger{color:#dc3545 !important}a.text-danger:hover,a.text-danger:focus{color:#a71d2a !important}.text-light{color:#f8f9fa !important}a.text-light:hover,a.text-light:focus{color:#cbd3da !important}.text-dark{color:#343a40 !important}a.text-dark:hover,a.text-dark:focus{color:#121416 !important}.text-body{color:#212529 !important}.text-muted{color:#6c757d !important}.text-black-50{color:rgba(0,0,0,0.5) !important}.text-white-50{color:rgba(255,255,255,0.5) !important}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.text-decoration-none{text-decoration:none !important}.text-break{word-break:break-word !important;overflow-wrap:break-word !important}.text-reset{color:inherit !important}.visible{visibility:visible !important}.invisible{visibility:hidden !important}@media print{*,*::before,*::after{text-shadow:none !important;box-shadow:none !important}a:not(.btn){text-decoration:underline}abbr[title]::after{content:" (" attr(title) ")"}pre{white-space:pre-wrap !important}pre,blockquote{border:1px solid #adb5bd;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}p,h2,h3{orphans:3;widows:3}h2,h3{page-break-after:avoid}@page{size:a3}body{min-width:992px !important}.container{min-width:992px !important}.navbar{display:none}.badge{border:1px solid #000}.table{border-collapse:collapse !important}.table td,.table th{background-color:#fff !important}.table-bordered th,.table-bordered td{border:1px solid #dee2e6 !important}.table-dark{color:inherit}.table-dark th,.table-dark td,.table-dark thead th,.table-dark tbody+tbody{border-color:#dee2e6}.table .thead-dark th{color:inherit;border-color:#dee2e6}}.highlight table td{padding:5px}.highlight table pre{margin:0}.highlight .cm{color:#999988;font-style:italic}.highlight .cp{color:#999999;font-weight:bold}.highlight .c1{color:#999988;font-style:italic}.highlight .cs{color:#999999;font-weight:bold;font-style:italic}.highlight .c,.highlight .cd{color:#8c8c8c;font-style:italic}.highlight .err{color:#a61717;background-color:#e3d2d2}.highlight .gd{color:#000000;background-color:#ffdddd}.highlight .ge{color:#000000;font-style:italic}.highlight .gr{color:#aa0000}.highlight .gh{color:#999999}.highlight .gi{color:#000000;background-color:#ddffdd}.highlight .go{color:#888888}.highlight .gp{color:#555555}.highlight .gs{font-weight:bold}.highlight .gu{color:#aaaaaa}.highlight .gt{color:#aa0000}.highlight .kc{color:#000000;font-weight:bold}.highlight .kd{color:#000000;font-weight:bold}.highlight .kn{color:#000000;font-weight:bold}.highlight .kp{color:#000000;font-weight:bold}.highlight .kr{color:#000000;font-weight:bold}.highlight .kt{color:#445588;font-weight:bold}.highlight .k,.highlight .kv{color:#000000;font-weight:bold}.highlight .mf{color:#009999}.highlight .mh{color:#009999}.highlight .il{color:#009999}.highlight .mi{color:#009999}.highlight .mo{color:#009999}.highlight .m,.highlight .mb,.highlight .mx{color:#009999}.highlight .sb{color:#d14}.highlight .sc{color:#d14}.highlight .sd{color:#d14}.highlight .s2{color:#d14}.highlight .se{color:#d14}.highlight .sh{color:#d14}.highlight .si{color:#d14}.highlight .sx{color:#d14}.highlight .sr{color:#009926}.highlight .s1{color:#d14}.highlight .ss{color:#990073}.highlight .s{color:#d14}.highlight .na{color:#008080}.highlight .bp{color:#999999}.highlight .nb{color:#0086B3}.highlight .nc{color:#445588;font-weight:bold}.highlight .no{color:#008080}.highlight .nd{color:#3c5d5d;font-weight:bold}.highlight .ni{color:#800080}.highlight .ne{color:#990000;font-weight:bold}.highlight .nf{color:#990000;font-weight:bold}.highlight .nl{color:#990000;font-weight:bold}.highlight .nn{color:#555555}.highlight .nt{color:#000080}.highlight .vc{color:#008080}.highlight .vg{color:#008080}.highlight .vi{color:#008080}.highlight .nv{color:#008080}.highlight .ow{color:#000000;font-weight:bold}.highlight .o{color:#000000;font-weight:bold}.highlight .w{color:#bbbbbb}.highlight{background-color:#f8f8f8}.container{padding-left:30px;padding-right:30px;max-width:1240px}.container-fluid{padding-left:0;padding-right:0}@font-face{font-family:FreightSans;font-weight:700;font-style:normal;src:url("/assets/fonts/FreightSans/freight-sans-bold.woff2") format("woff2"),url("/assets/fonts/FreightSans/freight-sans-bold.woff") format("woff")}@font-face{font-family:FreightSans;font-weight:700;font-style:italic;src:url("/assets/fonts/FreightSans/freight-sans-bold-italic.woff2") format("woff2"),url("/assets/fonts/FreightSans/freight-sans-bold-italic.woff") format("woff")}@font-face{font-family:FreightSans;font-weight:500;font-style:normal;src:url("/assets/fonts/FreightSans/freight-sans-medium.woff2") format("woff2"),url("/assets/fonts/FreightSans/freight-sans-medium.woff") format("woff")}@font-face{font-family:FreightSans;font-weight:500;font-style:italic;src:url("/assets/fonts/FreightSans/freight-sans-medium-italic.woff2") format("woff2"),url("/assets/fonts/FreightSans/freight-sans-medium-italic.woff") format("woff")}@font-face{font-family:FreightSans;font-weight:100;font-style:normal;src:url("/assets/fonts/FreightSans/freight-sans-light.woff2") format("woff2"),url("/assets/fonts/FreightSans/freight-sans-light.woff") format("woff")}@font-face{font-family:FreightSans;font-weight:100;font-style:italic;src:url("/assets/fonts/FreightSans/freight-sans-light-italic.woff2") format("woff2"),url("/assets/fonts/FreightSans/freight-sans-light-italic.woff") format("woff")}@font-face{font-family:FreightSans;font-weight:400;font-style:italic;src:url("/assets/fonts/FreightSans/freight-sans-book-italic.woff2") format("woff2"),url("/assets/fonts/FreightSans/freight-sans-book-italic.woff") format("woff")}@font-face{font-family:FreightSans;font-weight:400;font-style:normal;src:url("/assets/fonts/FreightSans/freight-sans-book.woff2") format("woff2"),url("/assets/fonts/FreightSans/freight-sans-book.woff") format("woff")}@font-face{font-family:IBMPlexMono;font-weight:600;font-style:normal;unicode-range:u+0020-007f;src:local("IBMPlexMono-SemiBold"),url("/assets/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2") format("woff2"),url("/assets/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff") format("woff")}@font-face{font-family:IBMPlexMono;font-weight:500;font-style:normal;unicode-range:u+0020-007f;src:local("IBMPlexMono-Medium"),url("/assets/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2") format("woff2"),url("/assets/fonts/IBMPlexMono/IBMPlexMono-Medium.woff") format("woff")}@font-face{font-family:IBMPlexMono;font-weight:400;font-style:normal;unicode-range:u+0020-007f;src:local("IBMPlexMono-Regular"),url("/assets/fonts/IBMPlexMono/IBMPlexMono-Regular.woff2") format("woff2"),url("/assets/fonts/IBMPlexMono/IBMPlexMono-Regular.woff") format("woff")}@font-face{font-family:IBMPlexMono;font-weight:300;font-style:normal;unicode-range:u+0020-007f;src:local("IBMPlexMono-Light"),url("/assets/fonts/IBMPlexMono/IBMPlexMono-Light.woff2") format("woff2"),url("/assets/fonts/IBMPlexMono/IBMPlexMono-Light.woff") format("woff")}*{font-family:FreightSans, Helvetica Neue, Helvetica, Arial, sans-serif;font-weight:400}h1,h2,h3,h4,h5,h6{font-family:FreightSans}p{margin-bottom:1.25rem}a,em,i,b,strong,u,span{font-size:inherit}a:link,a:visited,a:hover{text-decoration:none;color:#ee4c2c}p a:link,p a:visited,p a:hover{color:#ee4c2c;text-decoration:none}@media screen and (min-width: 768px){p a:hover{text-decoration:underline}p a.social-icon:hover{text-decoration:none}}.btn,a.btn{border-radius:0;border:none;background-color:#f3f4f7;color:#6c6c6d;font-weight:400;position:relative;letter-spacing:0.25px}.btn.btn-lg,.btn-group-lg>.btn,a.btn.btn-lg,.btn-group-lg>a.btn{font-size:1.125rem;padding-top:.5rem}.btn.btn-white,a.btn.btn-white{background-color:#fff}.btn.btn-orange,a.btn.btn-orange{background-color:#ee4c2c}.btn.btn-demo,a.btn.btn-demo{color:#fff}@media screen and (min-width: 768px){.btn:after,a.btn:after{content:"";display:block;width:0;height:1px;position:absolute;bottom:0;left:0;background-color:#ee4c2c;transition:width .250s ease-in-out}.btn:hover:after,a.btn:hover:after{width:100%}.btn:hover,a.btn:hover{color:#262626}}.navbar{padding-left:0;padding-right:0}html{position:relative;min-height:100%;font-size:12px}@media screen and (min-width: 768px){html{font-size:16px}}@media screen and (min-width: 768px){body{margin:0 0 620px}}body.no-scroll{height:100%;overflow:hidden}a.with-right-arrow,.btn.with-right-arrow{padding-right:2rem;position:relative;background-image:url("/assets/images/chevron-right-orange.svg");background-size:6px 13px;background-position:top 10px right 11px;background-repeat:no-repeat}@media screen and (min-width: 768px){a.with-right-arrow,.btn.with-right-arrow{background-size:8px 14px;background-position:top 15px right 12px;padding-right:2rem}}a.with-left-arrow,.btn.with-left-arrow{padding-left:2rem;position:relative;background-image:url("/assets/images/chevron-left-grey.svg");background-size:6px 13px;background-position:top 10px left 11px;background-repeat:no-repeat}@media screen and (min-width: 768px){a.with-left-arrow,.btn.with-left-arrow{background-size:8px 14px;background-position:top 16px left 12px;padding-left:2rem}}.main-background{position:absolute;top:0;left:0;width:100%;height:350px;background-size:100% 100%;background-repeat:no-repeat;background-image:url("/assets/images/pytorch_bg_purple.jpg")}@media screen and (min-width: 768px){.main-background{height:640px}}.main-background.home-page-background{z-index:-1;height:350px}@media screen and (min-width: 768px){.main-background.home-page-background{height:570px}}.main-background.hub-background{height:380px}@media screen and (min-width: 768px){.main-background.hub-background{height:495px}}@media screen and (min-width: 768px){.main-background.ecosystem-background{height:472px}}@media screen and (min-width: 768px){.main-background.events-background{height:472px}}@media screen and (min-width: 768px){.main-background.ecosystem-join-background{height:435px}}.main-background.resources-background{height:380px}@media screen and (min-width: 768px){.main-background.resources-background{height:472px}}.main-background.get-started-background{height:275px}@media screen and (min-width: 768px){.main-background.get-started-background{height:380px}}.main-background.comm-stories-background{height:275px}@media screen and (min-width: 768px){.main-background.comm-stories-background{height:380px}}.main-background.features-background{height:335px}@media screen and (min-width: 768px){.main-background.features-background{height:300px}}.bg-light-grey{background-color:#f3f4f7}.text-dark-grey{color:#6c6c6d}.sidebar-links .top-section{color:#000}.sidebar-links ul{list-style-type:none;padding-left:0}.sidebar-links ul li{color:#6c6c6d;margin-left:20px}.sidebar-links ul li a{color:inherit}.sidebar-links .with-sub-sections.top-section:before{content:"+ ";font-family:"Courier New", Courier, monospace;width:50px}.sidebar-links .with-sub-sections.top-section.open:before{content:"- ";font-family:"Courier New", Courier, monospace;width:50px}.bg-very-light-grey{background-color:#f3f4f7}.email-subscribe-form input.email{color:#ee4c2c;border:none;border-bottom:1px solid #939393;width:100%;background-color:transparent;outline:none;font-size:1.125rem;letter-spacing:0.25px;line-height:2.25rem}.email-subscribe-form ::-webkit-input-placeholder{color:#ee4c2c}.email-subscribe-form ::-moz-placeholder{color:#ee4c2c}.email-subscribe-form :-ms-input-placeholder{color:#ee4c2c}.email-subscribe-form :-moz-placeholder{color:#ee4c2c}.email-subscribe-form input[type="submit"]{position:absolute;right:0;top:10px;height:15px;width:15px;background-image:url("/assets/images/arrow-right-with-tail.svg");background-color:transparent;background-repeat:no-repeat;background-size:15px 15px;background-position:center center;-webkit-appearance:none;-moz-appearance:none;appearance:none;border:0}.email-subscribe-form-fields-wrapper{position:relative}.bg-slate{background-color:#262626}.tweets-wrapper{width:100%}.tweets-wrapper p{font-size:1rem;line-height:1.5rem;letter-spacing:0.22px}.tweets-wrapper ol{padding-left:0}.tweets-wrapper a{color:#ee4c2c}.tweets-wrapper img,.tweets-wrapper .timeline-Tweet-actions,.tweets-wrapper .timeline-Tweet-media,.tweets-wrapper .MediaCard{display:none !important}.tweet{margin-bottom:2.2rem;word-wrap:break-word}.tweet a{color:#ee4c2c;display:inline}.tweet a span{color:inherit}.tweet p,.tweet span{font-size:1rem;line-height:1.5rem;letter-spacing:0.22px;color:#A0A0A1}@media screen and (min-width: 1240px){.tweet p{padding-right:40px}}.tweet span.retweeted,.tweet span.in-reply-to{font-size:.8125rem}.tweet p.tweet-header{margin-bottom:.3125rem;line-height:.75rem}.tweet .tweet-bird:before{content:"";position:relative;left:0;background-image:url("/assets/images/logo-twitter-grey.svg");background-size:20px 16px;display:inline-block;width:20px;height:16px}@media screen and (min-width: 768px){.tweet .tweet-bird:before{margin-bottom:.625rem}}.anchorjs-link{color:#6c6c6d !important}@media screen and (min-width: 768px){.anchorjs-link:hover{color:inherit;text-decoration:none !important}}.article-page-module{background-color:#f3f4f7;padding-top:1.875rem;padding-bottom:1.875rem}@media screen and (min-width: 768px){.article-page-module{padding-top:3.75rem;padding-bottom:3.75rem}}@media screen and (min-width: 1240px){.article-page-module .col-md-3{padding-left:20px;padding-right:20px}}.article-page-module .module-link-col .btn{padding-left:0}@media screen and (min-width: 768px){.article-page-module .module-link-col{text-align:right}.article-page-module .module-link-col .btn{padding-left:inherit}}.article-page-module .module-content-wrapper{margin-top:1.25rem;margin-bottom:1.25rem}@media screen and (min-width: 768px){.article-page-module .module-content-wrapper{margin-top:0;margin-bottom:0}}.article-page-module img{margin-bottom:1.875rem;width:100%}.article-page-module h3{font-size:1.5rem;letter-spacing:1.33px;line-height:2rem;text-transform:uppercase;margin-bottom:1.25rem}@media screen and (min-width: 768px){.article-page-module h3{margin-bottom:3.75rem}}.article-page-module h5,.article-page-module p{font-size:1rem;line-height:1.5rem}.article-page-module h5{color:#262626}.article-page-module p{color:#CCCDD1;letter-spacing:0.25px}.article-page-module .module-header{position:relative}.article-page-module .module-button{padding-left:0}@media screen and (min-width: 768px){.article-page-module .module-button{position:absolute;right:15px;top:0;padding-top:0;padding-bottom:.125rem;background-position:center right;padding-right:16px}}article.pytorch-article .note-card{border-radius:0;border:none;background-color:#ee4c2c;color:white;padding:30px;margin-bottom:50px}article.pytorch-article .note-card h4{font-size:1.5rem;letter-spacing:1.33px;line-height:2rem;text-transform:uppercase;color:white;margin-top:0;margin-bottom:1.125rem}article.pytorch-article .note-card p{font-size:1.125rem;line-height:1.5em;margin-bottom:0;color:white}article.pytorch-article .note-card p a{color:white;font-weight:700}.ecosystem-card,.resource-card,.hub-card{border-radius:0;border:none;height:110px;margin-bottom:1.25rem;margin-bottom:1.875rem;overflow:scroll}@media screen and (min-width: 1240px){.ecosystem-card,.resource-card,.hub-card{height:150px;overflow:inherit}}@media (min-width: 768px) and (max-width: 1239px){.ecosystem-card,.resource-card,.hub-card{height:170px;overflow:inherit}}.ecosystem-card p.card-summary,.resource-card p.card-summary,.hub-card p.card-summary{font-size:1.125rem;line-height:1.5rem;margin-bottom:0;color:#6c6c6d}.ecosystem-card h4,.resource-card h4,.hub-card h4{color:#262626;margin-bottom:1.125rem;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.ecosystem-card a,.resource-card a,.hub-card a{height:100%}@media screen and (min-width: 768px){.ecosystem-card a,.resource-card a,.hub-card a{min-height:190px}}@media (min-width: 768px) and (max-width: 1239px){.ecosystem-card a,.resource-card a,.hub-card a{min-height:234px}}@media screen and (min-width: 768px){.ecosystem-card:after,.resource-card:after,.hub-card:after{content:"";display:block;width:0;height:1px;position:absolute;bottom:0;left:0;background-color:#ee4c2c;transition:width .250s ease-in-out}.ecosystem-card:hover:after,.resource-card:hover:after,.hub-card:hover:after{width:100%}.ecosystem-card:hover,.resource-card:hover,.hub-card:hover{color:#262626}}.ecosystem-card:hover p.card-summary,.resource-card:hover p.card-summary,.hub-card:hover p.card-summary{color:#262626}.ecosystem-card .card-body{background-position:top 1.25rem right 1.25rem;background-repeat:no-repeat;padding:1.5625rem 1.875rem}.ecosystem-card .card-body.reasoning{background-image:url("/assets/images/logo-elf.svg");background-size:29px 25px}.ecosystem-card .card-body.tool{background-image:url("/assets/images/logo-wav2letter.svg");background-size:29px 25px}.ecosystem-card .card-body.language{background-image:url("/assets/images/logo-parlai.svg");background-size:29px 25px}.ecosystem-card .card-body.vision{background-image:url("/assets/images/logo-detectron.svg");background-size:29px 25px}.resource-card{border:1px solid #d6d7d8;background-color:transparent;margin-bottom:1.25rem}@media screen and (min-width: 768px){.resource-card{margin-bottom:0}}@media (min-width: 768px) and (max-width: 1239px){.resource-card{height:225px}}.resource-card .pytorch-image{position:relative;height:1.25rem;width:1.25rem;top:3.125rem}.resource-card a{letter-spacing:0.25px;color:#262626}.resource-card .card-body{display:block;padding:0 15px 0 0;position:relative;top:20px;margin-left:60px}@media (min-width: 768px) and (max-width: 1239px){.resource-card .card-body{top:18px}}@media screen and (min-width: 1240px){.resource-card .card-body{top:30px;margin-left:80px;padding-right:30px}}.resource-card.slack:before,.resource-card.github:before,.resource-card.pytorch-resource:before{content:"";background-size:32px 32px;background-repeat:no-repeat;display:block;position:absolute;height:32px;width:32px;top:15px;left:15px}@media screen and (min-width: 1240px){.resource-card.slack:before,.resource-card.github:before,.resource-card.pytorch-resource:before{left:30px;top:30px}}.resource-card.slack:before{background-image:url("/assets/images/logo-slack.svg")}.resource-card.github:before{background-image:url("/assets/images/logo-github.svg")}.resource-card.pytorch-resource:before{background-image:url("/assets/images/logo-icon.svg")}.resource-card .pytorch-discuss .discuss{color:#ee4c2c;font-weight:400}@media screen and (min-width: 768px){.resource-card:after{content:"";display:block;width:0;height:1px;position:absolute;bottom:0;left:0;background-color:#ee4c2c;transition:width .250s ease-in-out}.resource-card:hover:after{width:100%}.resource-card:hover{color:#262626}}.article-page-module.similar-projects .ecosystem-card p.card-summary{font-size:1rem;height:36px}@media screen and (min-width: 768px){.article-page-module.similar-projects .ecosystem-card p.card-summary{height:50px}}#twitter-widget iframe{display:none !important}body.general .main-content-wrapper{margin-top:80px}@media screen and (min-width: 768px){body.general .main-content-wrapper{margin-top:100px}}.domain-card{background-color:#f3f4f7;padding:40px 20px;margin:20px 0}.domain-card h4{color:#000}.domain-card p{color:#6c6c6d;margin-bottom:0}.domain-card:hover h4{color:#ee4c2c}code,kbd,pre,samp,code b{font-family:IBMPlexMono,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace}code span,kbd span,pre span,samp span,code b span{font-family:IBMPlexMono,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace}pre{padding:1.125rem;background-color:#f3f4f7}pre code{font-size:.875rem}pre.highlight{background-color:#f3f4f7;line-height:1.3125rem}code.highlighter-rouge{color:#6c6c6d;background-color:#f3f4f7;padding:2px 6px}a:link code.highlighter-rouge,a:visited code.highlighter-rouge,a:hover code.highlighter-rouge{color:#4974D1}a:link.has-code,a:visited.has-code,a:hover.has-code{color:#4974D1}p code,h1 code,h2 code,h3 code,h4 code,h5 code,h6 code{font-size:78.5%}.header-holder{height:68px;align-items:center;display:flex;left:0;margin-left:auto;margin-right:auto;position:fixed;right:0;top:0;width:100%;z-index:9999}@media screen and (min-width: 1200px){.header-holder{height:70px}}@media screen and (min-width: 1200px){.header-holder{top:32px}}.header-holder.blog-header,.header-holder.blog-detail-header,.header-holder.resources-header,.header-holder.get-started-header,.header-holder.features-header,.header-holder.comm-stories-header,.header-holder.ecosystem-header,.header-holder.announcement-header,.header-holder.hub-header,.header-holder.mobile-header{background-color:#fff;border-bottom:1px solid #e2e2e2}.hello-bar{display:none}@media screen and (min-width: 1200px){.hello-bar{background-color:#CC2F90;color:#fff;display:flex;letter-spacing:.34px;justify-content:center;padding:4px 0;position:fixed;top:0;text-align:center;z-index:9999;margin-left:auto;margin-right:auto;width:100%}.hello-bar a{color:#fff;text-decoration:underline}}.header-container{position:relative;display:flex;align-items:center}.header-container:before,.header-container:after{content:"";display:table}.header-container:after{clear:both}.header-container{*zoom:1}@media screen and (min-width: 1200px){.header-container{display:block}}.header-logo{height:23px;width:93px;background-image:url("/assets/images/logo.svg");background-repeat:no-repeat;background-size:93px 23px;display:block;float:left}@media screen and (min-width: 1200px){.header-logo{background-size:108px 27px;position:absolute;height:27px;width:108px;top:4px;float:none}}.main-menu-open-button{background-image:url("/assets/images/icon-menu-dots.svg");background-position:center center;background-size:25px 7px;background-repeat:no-repeat;width:25px;height:7px;position:absolute;right:0;top:4px}@media screen and (min-width: 1200px){.main-menu-open-button{display:none}}.header-holder .main-menu{display:none}@media screen and (min-width: 1200px){.header-holder .main-menu{display:flex;align-items:center;justify-content:flex-end}}.header-holder .main-menu ul{display:flex;align-items:center;margin:0}.header-holder .main-menu ul li{display:inline-block;margin-right:34px;position:relative}.header-holder .main-menu ul li.active:after{content:"‱";bottom:-24px;color:#ee4c2c;font-size:1.375rem;left:0;position:absolute;right:0;text-align:center}.header-holder .main-menu ul li.active a{color:#ee4c2c}.header-holder .main-menu ul li.active .with-down-arrow{background-image:url("/assets/images/chevron-down-orange.svg")}.header-holder .main-menu ul li.resources-active:after{left:-27px}.header-holder .main-menu ul li:last-of-type{margin-right:0}.header-holder .main-menu ul li a{color:#fff;font-size:1.2rem;letter-spacing:0;line-height:2.125rem;text-align:center;text-decoration:none;padding-bottom:10px}@media screen and (min-width: 1200px){.header-holder .main-menu ul li a:hover{color:#ffffff;border-bottom:2px solid #ffffff}}.header-holder .main-menu ul li a.with-down-arrow{cursor:default;padding-right:2rem;position:relative;background-image:url("/assets/images/chevron-down-white.svg");background-size:14px 18px;background-position:top 7px right 10px;background-repeat:no-repeat;padding-bottom:20px}.header-holder .main-menu ul li a.with-down-arrow:hover{border-bottom:none}.header-holder .main-menu ul li a.with-down-arrow .dropdown-menu{border-radius:0;padding:0}.header-holder .main-menu ul li a.with-down-arrow .dropdown-menu .dropdown-item{color:#6c6c6d;border-bottom:1px solid #e2e2e2}.header-holder .main-menu ul li a.with-down-arrow .dropdown-menu .dropdown-item:last-of-type{border-bottom-color:transparent}.header-holder .main-menu ul li a.with-down-arrow .dropdown-menu .dropdown-item:hover{background-color:#ee4c2c}.header-holder .main-menu ul li a.with-down-arrow .dropdown-menu .dropdown-item p{font-size:1rem;color:#757575}.header-holder .main-menu ul li a.with-down-arrow .dropdown-menu a.dropdown-item:hover{color:#fff}.header-holder .main-menu ul li a.with-down-arrow .dropdown-menu a.dropdown-item:hover p{color:#fff}.mobile-main-menu{display:none}.mobile-main-menu.open{background-color:#262626;display:block;height:100%;left:0;margin-left:auto;margin-right:auto;min-height:100%;position:fixed;right:0;top:0;width:100%;z-index:99999}.mobile-main-menu .container-fluid{background-color:inherit;align-items:center;display:flex;height:68px;position:relative;z-index:1}.mobile-main-menu .container-fluid:before,.mobile-main-menu .container-fluid:after{content:"";display:table}.mobile-main-menu .container-fluid:after{clear:both}.mobile-main-menu .container-fluid{*zoom:1}.mobile-main-menu.open ul{list-style-type:none;padding:0}.mobile-main-menu.open ul li a,.mobile-main-menu.open .resources-mobile-menu-title{font-size:2rem;color:#fff;letter-spacing:0;line-height:4rem}.mobile-main-menu.open ul li.active a{color:#ee4c2c}.main-menu-close-button{background-image:url("/assets/images/icon-close.svg");background-position:center center;background-repeat:no-repeat;background-size:24px 24px;height:24px;position:absolute;right:0;width:24px;top:-4px}.mobile-main-menu-header-container{position:relative}.mobile-main-menu-links-container{display:flex;padding-left:2.8125rem;height:100%;min-height:100%;margin-top:20px;overflow-y:scroll}@media only screen and (max-width: 320px){.mobile-main-menu-links-container .main-menu{padding-top:5rem}}@media only screen and (max-width: 320px){.mobile-main-menu-links-container .navSearchWrapper{width:75%}}#topnav-gh-icon{background-image:url(/assets/social/github-white.svg);color:white;width:33px;height:33px;background-size:23px 23px;background-repeat:no-repeat;background-position:5px 4px;border-radius:25px}#topnav-gh-icon:hover{background-color:#88888833}.blog-header .header-logo,.blog-detail-header .header-logo,.resources-header .header-logo,.get-started-header .header-logo,.features-header .header-logo,.ecosystem-header .header-logo,.announcement-header .header-logo,.comm-stories-header .header-logo,.hub-header .header-logo,.mobile-header .header-logo{background-image:url("/assets/images/logo-dark.svg")}.blog-header .main-menu ul li a,.blog-detail-header .main-menu ul li a,.resources-header .main-menu ul li a,.get-started-header .main-menu ul li a,.features-header .main-menu ul li a,.ecosystem-header .main-menu ul li a,.announcement-header .main-menu ul li a,.comm-stories-header .main-menu ul li a,.hub-header .main-menu ul li a,.mobile-header .main-menu ul li a{color:#262626}@media screen and (min-width: 1200px){.blog-header .main-menu ul li a:hover,.blog-detail-header .main-menu ul li a:hover,.resources-header .main-menu ul li a:hover,.get-started-header .main-menu ul li a:hover,.features-header .main-menu ul li a:hover,.ecosystem-header .main-menu ul li a:hover,.announcement-header .main-menu ul li a:hover,.comm-stories-header .main-menu ul li a:hover,.hub-header .main-menu ul li a:hover,.mobile-header .main-menu ul li a:hover{color:#262626;border-bottom:2px solid #262626}}.blog-header .main-menu ul li a.with-down-arrow,.blog-detail-header .main-menu ul li a.with-down-arrow,.resources-header .main-menu ul li a.with-down-arrow,.get-started-header .main-menu ul li a.with-down-arrow,.features-header .main-menu ul li a.with-down-arrow,.ecosystem-header .main-menu ul li a.with-down-arrow,.announcement-header .main-menu ul li a.with-down-arrow,.comm-stories-header .main-menu ul li a.with-down-arrow,.hub-header .main-menu ul li a.with-down-arrow,.mobile-header .main-menu ul li a.with-down-arrow{background-image:url("/assets/images/chevron-down-black.svg")}.blog-header .main-menu-open-button,.blog-detail-header .main-menu-open-button,.resources-header .main-menu-open-button,.get-started-header .main-menu-open-button,.features-header .main-menu-open-button,.ecosystem-header .main-menu-open-button,.announcement-header .main-menu-open-button,.comm-stories-header .main-menu-open-button,.hub-header .main-menu-open-button,.mobile-header .main-menu-open-button{background-image:url("/assets/images/icon-menu-dots-dark.svg")}.blog-header #topnav-gh-icon,.blog-detail-header #topnav-gh-icon,.resources-header #topnav-gh-icon,.get-started-header #topnav-gh-icon,.features-header #topnav-gh-icon,.ecosystem-header #topnav-gh-icon,.announcement-header #topnav-gh-icon,.comm-stories-header #topnav-gh-icon,.hub-header #topnav-gh-icon,.mobile-header #topnav-gh-icon{background-image:url(/assets/social/github-black.svg)}.ecosystem-dropdown-menu,.resources-dropdown-menu{left:-25px;width:300px;display:none;position:absolute;z-index:1000;display:none;top:45px;float:left;min-width:10rem;padding:0.5rem 0;font-size:1rem;color:#212529;text-align:left;list-style:none;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,0.15);border-radius:0.25rem}.ecosystem-dropdown:hover .ecosystem-dropdown-menu,.ecosystem-dropdown:hover .resources-dropdown-menu,.resources-dropdown:hover .ecosystem-dropdown-menu,.resources-dropdown:hover .resources-dropdown-menu,.resources-active:hover .ecosystem-dropdown-menu,.resources-active:hover .resources-dropdown-menu{display:block}.main-menu ul li .ecosystem-dropdown-menu,.main-menu ul li .resources-dropdown-menu{border-radius:0;padding:0}.main-menu ul li .ecosystem-dropdown-menu .dropdown-item,.main-menu ul li .resources-dropdown-menu .dropdown-item{color:#6c6c6d;border-bottom:1px solid #e2e2e2}.header-holder .main-menu ul li a.nav-dropdown-item{display:block;font-size:1rem;line-height:1.3125rem;width:100%;padding:0.25rem 1.5rem;clear:both;font-weight:400;color:#757575;text-align:left;background-color:transparent;border-bottom:1px solid #e2e2e2}.header-holder .main-menu ul li a.nav-dropdown-item p{margin-bottom:.5rem}.header-holder .main-menu ul li a.nav-dropdown-item:last-of-type{border-bottom-color:transparent}.header-holder .main-menu ul li a.nav-dropdown-item:hover{background-color:#ee4c2c;color:white}.header-holder .main-menu ul li a.nav-dropdown-item .dropdown-title{font-size:1.125rem;color:#212529;letter-spacing:0;line-height:34px}.header-holder .main-menu ul li a.nav-dropdown-item .docs-title{display:block;padding-top:0.5rem}.header-holder .main-menu ul li a.nav-dropdown-item:hover .dropdown-title{background-color:#ee4c2c;color:white}.mobile-main-menu-links-container ul.resources-mobile-menu-items li{padding-left:15px}.mobile-main-menu-links-container ul.resources-mobile-menu-items li a{font-size:1.5rem;line-height:3rem}.jumbotron{background-color:transparent;position:absolute;left:0;right:0;margin-right:auto;margin-left:auto;padding:0;margin-bottom:0;display:flex;align-items:center;top:68px}@media screen and (min-width: 768px){.jumbotron{height:550px;top:90px}}.jumbotron .jumbotron-content{display:flex;align-items:center}.jumbotron .lead{font-weight:400;letter-spacing:0.25px;font-size:20px;line-height:1.2}@media screen and (min-width: 768px){.jumbotron .lead{font-size:29px}}.jumbotron h1{font-size:2rem;text-transform:uppercase;font-weight:lighter;letter-spacing:1.08px;margin-bottom:.625rem;line-height:1.05;margin-top:4rem}@media screen and (min-width: 768px){.jumbotron h1{font-size:3.875rem;margin-top:0}}.jumbotron h1 img{margin-bottom:1rem}.jumbotron p{font-size:1.125rem;margin-bottom:1.25rem}@media screen and (min-width: 1200px){.jumbotron p{width:50%}}.jumbotron.on-dark-background h1,.jumbotron.on-dark-background p{color:#fff}.jumbotron .btn{padding-top:.5625rem}@media screen and (min-width: 768px){.jumbotron .btn{margin-top:.625rem}}.homepage .main-content-wrapper{margin-top:315px}@media screen and (min-width: 768px){.homepage .main-content-wrapper{margin-top:472px}}.homepage h2{margin-bottom:1.5625rem;text-transform:uppercase;letter-spacing:1.78px;line-height:2.5rem}@media screen and (min-width: 768px){.homepage h2{margin-bottom:2.0625rem}}.homepage h3{font-size:1.5rem;letter-spacing:1.33px;line-height:2rem;text-transform:uppercase;margin-bottom:1.25rem}.homepage h5{margin-bottom:.5rem}@media screen and (min-width: 768px){.homepage h5{margin-bottom:.9375rem}}.homepage .jumbotron{height:195px}@media screen and (min-width: 768px){.homepage .jumbotron{height:395px}}.homepage .jumbotron .btn{margin-top:.375rem}.homepage .ecosystem-row .card{background-color:#f3f4f7}.homepage .homepage-header{background-color:rgba(0,0,0,0.165)}.homepage-feature-module{padding-top:2.5rem;padding-bottom:2.5rem}@media screen and (min-width: 768px){.homepage-feature-module{padding-top:3.875rem;padding-bottom:4.5rem}.homepage-feature-module .module-button{position:absolute;right:15px;top:0}}.homepage-feature-module p{color:#6c6c6d;font-size:1.125em}.homepage-feature-module .title{color:#000;font-weight:300;font-size:1.5rem}@media (min-width: 768px) and (max-width: 1239px){.homepage-feature-module .title{font-size:1.25rem}}.homepage-feature-module .pytorch-title{font-size:1.5rem;letter-spacing:0.33px;line-height:2.25rem}.homepage-feature-module .subtext{font-size:1.125rem;color:#8c8c8c;letter-spacing:0;line-height:1.5rem}@media (min-width: 768px) and (max-width: 1239px){.homepage-feature-module .subtext{font-size:.9375rem}}.key-features-module{padding-bottom:0}@media screen and (min-width: 768px){.key-features-module{padding-bottom:1.55rem}}.key-features-module .key-features-boxes{margin-top:2rem}@media screen and (min-width: 768px){.key-features-module .key-features-boxes{margin-top:0}}.key-features-module .key-feature-box{margin-bottom:2rem}.key-features-module .key-feature-box p{margin-bottom:0;letter-spacing:0.25px}@media screen and (min-width: 768px){.key-features-module .key-feature-box{margin-bottom:2.5rem}}.community-heading{margin-top:2rem}.community-module{background-color:#fff}.community-module .ecosystem-card{height:auto}@media (min-width: 768px) and (max-width: 1239px){.community-module .ecosystem-card{padding:.625rem}}.community-module h2{margin-bottom:0}.community-module h5{text-transform:uppercase;color:#c6000a;margin-bottom:1.25rem}.community-module .h2-subheadline{margin-top:1.25rem;margin-bottom:2.6rem}@media screen and (min-width: 768px){.community-module .h2-subheadline{margin-top:0}}@media (min-width: 768px) and (max-width: 1239px){.community-module .card-body{padding:.625rem}}.community-module .module-button{background-color:#f3f4f7}.community-module p{margin-bottom:2.5rem;letter-spacing:0.25px}.community-module .module-subtext{margin-right:15.625rem}.community-module .email-subscribe-form input.email{border-bottom:1px solid #d6d7d8;font-size:1.25rem;line-height:0;padding-bottom:.75rem}.community-module .email-subscribe-form input[type="submit"]{top:6px}@media screen and (min-width: 768px){.community-module .email-subscribe-form input[type="submit"]{top:10px}}.pytorch-users-module,.homepage-bottom-wrapper{background-color:#f3f4f7}@media screen and (min-width: 768px){.pytorch-users-module{padding-bottom:1.9rem}}.community-avatar{height:60px;width:60px}.community-logo-bottom{height:200px;background-color:#f3f4f7}.university-testimonials h2{margin-bottom:2.2rem}.university-testimonials-content{margin-top:2.5rem;margin-bottom:2rem}@media screen and (min-width: 768px){.university-testimonials-content{margin-top:0}}.university-testimonials-content .col-md-4{margin-bottom:2.5rem}.university-testimonials-content .case-study-title{font-size:1.5rem;margin-bottom:1.25rem}.university-testimonials-content p{color:#6c6c6d;font-size:1.125rem;letter-spacing:0.25px}.university-testimonials-content .btn{background-color:#fff}.follow-us-on-twitter h2{margin-bottom:1.25rem}@media screen and (min-width: 768px){.follow-us-on-twitter h2{margin-bottom:2.5rem}}.homepage-feature-module .tweets-wrapper p{font-size:1rem}.quick-starts p{font-size:1.125rem;line-height:1.75rem}.quick-start-guides{font-size:1.5rem;letter-spacing:0.25px;line-height:2.25rem;color:#a5a5a5}.quick-start-guides .step-counter{margin-bottom:.1875rem}.quick-start-guides ul{list-style-type:none;padding-left:0}.quick-start-guides ul li{margin-bottom:0;font-size:1.125rem}@media screen and (min-width: 768px){.quick-start-guides ul li{margin-bottom:.75rem}.quick-start-guides ul li:last-of-type{margin-bottom:0}}.quick-start-guides ul li.selected{color:#ee4c2c}.quick-start-guides ul li.selected:before{content:"\2022";position:absolute;left:0}@media screen and (min-width: 768px){.quick-start-guides ul li.selected:before{left:-5px}}.quick-start-guides .select-instructions{color:#262626;border-bottom:2px solid #a5a5a5;margin-bottom:1rem;font-size:1.125rem;display:inline-block}@media screen and (min-width: 768px){.quick-start-guides .select-instructions{margin-bottom:0}}.homepage .news-banner-container{background:#000;color:#fff;text-align:center;padding:20px;width:90%}.homepage .news-banner-container .right-arrow,.homepage .news-banner-container .left-arrow{height:15px;bottom:-3px;position:relative}@media screen and (min-width: 768px){.homepage .news-banner-container .right-arrow,.homepage .news-banner-container .left-arrow{bottom:-8px}}.homepage .news-banner-container .right-arrow:hover,.homepage .news-banner-container .left-arrow:hover{cursor:pointer}.homepage .news-banner-container .right-arrow{float:right}.homepage .news-banner-container .left-arrow{float:left}.homepage #news-items .pagination{display:none !important}.banner-info{display:inline-block;overflow:hidden;text-overflow:ellipsis;white-space:nowrap;margin:auto;width:80%;font-size:1.125rem}@media screen and (min-width: 768px){.banner-info{padding-top:3px}}.banner-info:hover{cursor:pointer;color:#ee4c2c}.news-banner-text a{color:white}.news-banner-text a:hover{color:#ee4c2c}.no-banner{padding-bottom:2rem}.homepage-box-module div.col-md{background:#F3F4F7;margin:10px;padding:30px}@media screen and (min-width: 768px){.homepage-box-module div.col-md{margin:20px}}.site-footer{padding:3.75rem 0;width:100%;background:#000;background-size:100%;margin-left:0;margin-right:0}@media screen and (min-width: 768px){.site-footer{position:absolute;left:0;bottom:0;height:620px}}.site-footer p{color:#fff}.site-footer ul{list-style-type:none;padding-left:0;margin-bottom:0}.site-footer ul li{font-size:1.125rem;line-height:2rem;color:#A0A0A1;padding-bottom:.375rem}.site-footer ul li.list-title{padding-bottom:.75rem;color:#fff}.site-footer ul li.list-title p{margin-bottom:0}.site-footer a:link,.site-footer a:visited{color:inherit}@media screen and (min-width: 768px){.site-footer a:hover{color:#ee4c2c}}.site-footer .privacy-policy{background:#000000;border-top:1px solid #fff;display:flex;flex-direction:column;margin-top:40px}.site-footer .privacy-policy ul{border-bottom:1px solid white}.site-footer .privacy-policy ul .privacy-policy-links{padding-bottom:1rem;padding-top:1rem;padding-right:1rem;display:inline-flex;color:white}.site-footer .privacy-policy .copyright{padding-top:1rem}.site-footer .privacy-policy .copyright p{color:#dfdfdf;font-size:14px}.site-footer .privacy-policy .copyright a{color:#dfdfdf;font-weight:600}.site-footer .privacy-policy .copyright a:hover{color:#dfdfdf;font-weight:600}.docs-tutorials-resources{background-color:#262626;color:#fff;padding-top:2.5rem;padding-bottom:2.5rem}@media screen and (min-width: 768px){.docs-tutorials-resources{padding-top:4.125rem;padding-bottom:4.09rem}}.docs-tutorials-resources h2{font-size:1.5rem;letter-spacing:-0.25px;text-transform:none;margin-bottom:0.25rem}@media screen and (min-width: 768px){.docs-tutorials-resources h2{margin-bottom:1.25rem}}.docs-tutorials-resources .col-md-4{margin-bottom:2rem}@media screen and (min-width: 768px){.docs-tutorials-resources .col-md-4{margin-bottom:0}}.docs-tutorials-resources .with-right-arrow{margin-left:12px;background-position:top 3px right 11px}@media screen and (min-width: 768px){.docs-tutorials-resources .with-right-arrow{background-position:top 6px right 11px}}.docs-tutorials-resources .with-right-arrow:hover{background-image:url("/assets/images/chevron-right-white.svg")}.docs-tutorials-resources p{font-size:1rem;line-height:1.5rem;letter-spacing:0.22px;color:#A0A0A1;margin-bottom:.5rem}@media screen and (min-width: 768px){.docs-tutorials-resources p{margin-bottom:1.25rem}}.docs-tutorials-resources a{font-size:1.125rem;color:#ee4c2c}.docs-tutorials-resources a:hover{color:#fff}.footer-container{position:relative}.footer-logo-wrapper{display:none}@media screen and (min-width: 768px){.footer-logo-wrapper{display:flex;grid-column:span 6}}.footer-logo-wrapper .footer-logo img{width:40px}.footer-links-wrapper{display:flex;flex-wrap:wrap;padding-bottom:1rem;border-bottom:1px solid white}@media screen and (min-width: 768px){.footer-links-wrapper{flex-wrap:initial;justify-content:flex-end}}.footer-links-col{margin-bottom:3.75rem;width:50%}@media screen and (min-width: 768px){.footer-links-col{margin-bottom:0;width:14%;margin-right:23px}.footer-links-col.follow-us-col{width:18%;margin-right:0}}@media (min-width: 768px) and (max-width: 1239px){.footer-links-col{width:18%;margin-right:30px}}.footer-social-icons{margin:8.5625rem 0 2.5rem 0}.footer-social-icons a{height:32px;width:32px;display:inline-block;background-color:#CCCDD1;border-radius:50%;margin-right:5px}.footer-social-icons a.facebook{background-image:url("/assets/images/logo-facebook-dark.svg");background-position:center center;background-size:9px 18px;background-repeat:no-repeat}.footer-social-icons a.twitter{background-image:url("/assets/images/logo-twitter-dark.svg");background-position:center center;background-size:17px 17px;background-repeat:no-repeat}.footer-social-icons a.youtube{background-image:url("/assets/images/logo-youtube-dark.svg");background-position:center center;background-repeat:no-repeat}.site-footer .mc-field-group{margin-top:-2px}.site-footer .email-subscribe-form input[type="submit"]{top:9px}@media screen and (min-width: 768px){.site-footer .email-subscribe-form input[type="submit"]{top:13px}}.social-links{grid-column:span 12;display:grid;grid-column-gap:3%;grid-row-gap:30px;grid-template-columns:repeat(6, minmax(0, 1fr))}@media (min-width: 600px){.social-links{grid-column:span 8}}@media screen and (min-width: 768px){.social-links{grid-column:span 6;align-self:end}}@media (max-width: 999px){.social-links{margin-left:10px;margin-right:10px}}.social-links li{text-align:center}.social-links svg{height:25px;max-width:30px;fill:#fff;color:#fff}.social-links svg:hover{fill:#ee4c2c;color:#ee4c2c}.lf-grid{grid-column-gap:3%;grid-row-gap:30px;display:grid;grid-template-columns:repeat(12, 1fr)}.hs-recaptcha{display:none}.newsletter{line-height:140%;margin-bottom:80px}.newsletter__title{line-height:140%;font-size:24px}@media (min-width: 1000px){.newsletter__title{font-size:40px}}.newsletter .legal-consent-container{display:none}.newsletter p.newsletter__privacy{max-width:860px;margin-top:30px;line-height:21px;font-size:14px;color:#dfdfdf}.newsletter p.newsletter__privacy a{color:#dfdfdf;font-weight:600}.newsletter p.newsletter__privacy a:hover{color:#dfdfdf;font-weight:600}.newsletter .hbspt-form{min-height:300px}@media (min-width: 500px){.newsletter .hbspt-form{min-height:100px}}@media (min-width: 1000px){.newsletter .hbspt-form{min-height:20px}}.newsletter .hbspt-form .hs-error-msg{display:block;margin-right:8px;color:#ee4c2c;font-size:14px;line-height:1.1em;width:95%;padding-top:15px}.newsletter .hbspt-form .hs-form{display:grid;grid-template-columns:1fr;grid-gap:30px}@media (min-width: 500px){.newsletter .hbspt-form .hs-form{grid-template-columns:minmax(0, 1fr) minmax(0, 1fr)}}@media (min-width: 700px){.newsletter .hbspt-form .hs-form{grid-template-columns:repeat(3, minmax(0, 1fr))}}@media (min-width: 950px){.newsletter .hbspt-form .hs-form{grid-template-columns:1fr 1fr 1fr 1fr 1fr;grid-row-gap:1.5rem;grid-column-gap:1.5rem}}.newsletter .hbspt-form .hs-form input[type='text'],.newsletter .hbspt-form .hs-form input[type='email']{height:50px;width:100%;background:transparent;border:none;border-bottom:2px solid #fff;border-radius:0;transition:all 0.25s ease;color:#fff;font-size:16px;line-height:105%}@media (min-width: 500px){.newsletter .hbspt-form .hs-form input[type='text'],.newsletter .hbspt-form .hs-form input[type='email']{height:42px}}@media (min-width: 500px){.newsletter .hbspt-form .hs-form input[type='text'],.newsletter .hbspt-form .hs-form input[type='email']{font-size:20px}}.newsletter .hbspt-form .hs-form input[type='text']::-moz-placeholder, .newsletter .hbspt-form .hs-form input[type='email']::-moz-placeholder{color:#fff;font-size:16px;line-height:105%}.newsletter .hbspt-form .hs-form input[type='text']:-ms-input-placeholder, .newsletter .hbspt-form .hs-form input[type='email']:-ms-input-placeholder{color:#fff;font-size:16px;line-height:105%}.newsletter .hbspt-form .hs-form input[type='text']::-ms-input-placeholder, .newsletter .hbspt-form .hs-form input[type='email']::-ms-input-placeholder{color:#fff;font-size:16px;line-height:105%}.newsletter .hbspt-form .hs-form input[type='text']::placeholder,.newsletter .hbspt-form .hs-form input[type='email']::placeholder{color:#fff;font-size:16px;line-height:105%}@media (min-width: 500px){.newsletter .hbspt-form .hs-form input[type='text']::-moz-placeholder, .newsletter .hbspt-form .hs-form input[type='email']::-moz-placeholder{font-size:20px}.newsletter .hbspt-form .hs-form input[type='text']:-ms-input-placeholder, .newsletter .hbspt-form .hs-form input[type='email']:-ms-input-placeholder{font-size:20px}.newsletter .hbspt-form .hs-form input[type='text']::-ms-input-placeholder, .newsletter .hbspt-form .hs-form input[type='email']::-ms-input-placeholder{font-size:20px}.newsletter .hbspt-form .hs-form input[type='text']::placeholder,.newsletter .hbspt-form .hs-form input[type='email']::placeholder{font-size:20px}}.newsletter .hbspt-form .hs-form input[type='text']:focus,.newsletter .hbspt-form .hs-form input[type='email']:focus{outline:0;border-bottom:2px solid #ee4c2c;transition:color 0.25s ease}.newsletter .hbspt-form .hs-form input[type='text']:focus::-moz-placeholder, .newsletter .hbspt-form .hs-form input[type='email']:focus::-moz-placeholder{-moz-transition:color 0.25s ease;transition:color 0.25s ease;color:transparent}.newsletter .hbspt-form .hs-form input[type='text']:focus:-ms-input-placeholder, .newsletter .hbspt-form .hs-form input[type='email']:focus:-ms-input-placeholder{-ms-transition:color 0.25s ease;transition:color 0.25s ease;color:transparent}.newsletter .hbspt-form .hs-form input[type='text']:focus::-ms-input-placeholder, .newsletter .hbspt-form .hs-form input[type='email']:focus::-ms-input-placeholder{-ms-transition:color 0.25s ease;transition:color 0.25s ease;color:transparent}.newsletter .hbspt-form .hs-form input[type='text']:focus::placeholder,.newsletter .hbspt-form .hs-form input[type='email']:focus::placeholder{transition:color 0.25s ease;color:transparent}.newsletter .hbspt-form .hs-form input:-webkit-autofill,.newsletter .hbspt-form .hs-form input:-webkit-autofill:hover,.newsletter .hbspt-form .hs-form input:-webkit-autofill:focus,.newsletter .hbspt-form .hs-form textarea:-webkit-autofill,.newsletter .hbspt-form .hs-form textarea:-webkit-autofill:hover,.newsletter .hbspt-form .hs-form textarea:-webkit-autofill:focus,.newsletter .hbspt-form .hs-form select:-webkit-autofill,.newsletter .hbspt-form .hs-form select:-webkit-autofill:hover,.newsletter .hbspt-form .hs-form select:-webkit-autofill:focus{-webkit-text-fill-color:#fff}.newsletter .hbspt-form .hs-form select{-webkit-appearance:none;-moz-appearance:none;appearance:none;background:transparent;border:0px solid transparent;border-bottom:2px solid #fff;border-radius:0;box-shadow:0 1px 0 1px transparent;display:block;height:50px;margin:0;max-width:100%;padding:0.25em 0 calc(0.25em + 1px) 5px;transition:all 0.25s ease;width:100%;color:#fff;font-size:16px;line-height:105%}@media (min-width: 500px){.newsletter .hbspt-form .hs-form select{height:42px}}@media (min-width: 500px){.newsletter .hbspt-form .hs-form select{font-size:20px}}.newsletter .hbspt-form .hs-form select::-ms-expand{display:none}.newsletter .hbspt-form .hs-form select:focus{outline:0;border-bottom:2px solid #ee4c2c}.newsletter .hbspt-form .hs-form select:focus::-moz-placeholder{-moz-transition:color 0.4s ease;transition:color 0.4s ease;color:transparent}.newsletter .hbspt-form .hs-form select:focus:-ms-input-placeholder{-ms-transition:color 0.4s ease;transition:color 0.4s ease;color:transparent}.newsletter .hbspt-form .hs-form select:focus::-ms-input-placeholder{-ms-transition:color 0.4s ease;transition:color 0.4s ease;color:transparent}.newsletter .hbspt-form .hs-form select:focus::placeholder{transition:color 0.4s ease;color:transparent}.newsletter .hbspt-form .hs-form select option{font-weight:normal;color:black}.newsletter .hbspt-form .hs-form .hs-button{border-radius:5px;margin-top:20px;border:none;background-color:#ee4c2c;color:#fff;font-weight:400;padding:11px 40px;font-size:16px;font-weight:700;text-decoration:none}.newsletter .hbspt-form .hs-form .hs-input.invalid{border-bottom:2px dashed red !important}.newsletter .hbspt-form .hs-form .hs_error_rollup{display:none}.newsletter .submitted-message{display:flex;align-content:center;align-items:center;justify-content:center;border:2px solid #fff;min-height:280px;font-size:18px;padding:20px 20px 0;line-height:1.1em}@media (min-width: 500px){.newsletter .submitted-message{min-height:80px}}@media (min-width: 1000px){.newsletter .submitted-message{min-height:unset}}.newsletter .submitted-message p{max-width:none}.main-content-wrapper{margin-top:300px}@media screen and (min-width: 768px){.main-content-wrapper{margin-top:540px;min-height:400px}}.main-content{padding-top:1.5rem;padding-bottom:1.5rem}@media screen and (min-width: 768px){.main-content{padding-top:2.625rem}}.main-content-menu{margin-bottom:1.25rem}@media screen and (min-width: 768px){.main-content-menu{margin-bottom:5rem}}.main-content-menu .navbar-nav .nav-link{color:#262626;padding-left:1.875rem;padding-right:1.875rem}@media screen and (min-width: 768px){.main-content-menu .navbar-nav .nav-link:first-of-type{padding-left:0}}article.pytorch-article{max-width:920px;margin:0 auto;padding-bottom:90px}article.pytorch-article h2,article.pytorch-article h3,article.pytorch-article h4,article.pytorch-article h5,article.pytorch-article h6{margin-top:1.875rem;margin-bottom:1.5rem;color:#262626}article.pytorch-article h2{font-size:1.5rem;letter-spacing:1.33px;line-height:2rem;margin-top:3.125rem;text-transform:uppercase}article.pytorch-article h3{font-size:1.5rem;letter-spacing:-0.25px;line-height:1.875rem;text-transform:none}article.pytorch-article h4,article.pytorch-article h5,article.pytorch-article h6{font-size:1.125rem;letter-spacing:-0.19px;line-height:1.875rem}article.pytorch-article p{margin-bottom:1.125rem}article.pytorch-article p,article.pytorch-article ul li,article.pytorch-article ol li,article.pytorch-article dl dt,article.pytorch-article dl dd,article.pytorch-article blockquote{font-size:1.125rem;line-height:1.875rem;color:#6c6c6d}article.pytorch-article table{margin-bottom:2.5rem;width:100%}article.pytorch-article table thead{border-bottom:1px solid #cacaca}article.pytorch-article table th,article.pytorch-article table tr,article.pytorch-article table td{color:#6c6c6d;font-size:1rem;letter-spacing:-0.17px}article.pytorch-article table th{padding:.625rem;color:#262626}article.pytorch-article table td{padding:.3125rem}article.pytorch-article ul,article.pytorch-article ol{margin:1.5rem 0 3.125rem 0}@media screen and (min-width: 768px){article.pytorch-article ul,article.pytorch-article ol{padding-left:6.25rem}}article.pytorch-article ul li,article.pytorch-article ol li{margin-bottom:.625rem}article.pytorch-article dl{margin-bottom:2.5rem}article.pytorch-article dl dt{margin-bottom:.75rem;font-weight:400}article.pytorch-article pre{margin-bottom:2.5rem}article.pytorch-article hr{margin-top:4.6875rem;margin-bottom:4.6875rem}article.pytorch-article blockquote{font-size:.75rem;font-style:italic;padding:15px 15px 5px 15px;width:100%;background-color:rgba(211,211,211,0.3);border-left:2px solid #000000}article.pytorch-article h3.no_toc{margin:0px}article.pytorch-article nav{float:right;display:block;overflow-y:auto;background-color:white;margin-left:20px;border-left:1px #717171}article.pytorch-article nav li{font-size:12px;line-height:20px;padding-top:0px;list-style:none}article.pytorch-article nav a{color:#717171;font-weight:bold}article.pytorch-article ul#markdown-toc{padding-left:1em;margin:0px}article.pytorch-article ul#markdown-toc ul{margin:0px;padding-left:1em}article.pytorch-article ul#markdown-toc li{margin:0px}.get-started article{margin-bottom:5rem}.get-started .quick-start-guides ul{margin-bottom:0;padding-left:0}.get-started .main-content-wrapper{margin-top:275px}@media screen and (min-width: 768px){.get-started .main-content-wrapper{margin-top:350px}}.get-started .jumbotron{height:190px}@media screen and (min-width: 768px){.get-started .jumbotron{height:260px}}.get-started .main-content .navbar{background-color:#f3f4f7;padding-left:0;padding-bottom:0;padding-top:0}@media (min-width: 992px){.get-started .main-content .navbar li:first-of-type{padding-left:3.4375rem}.get-started .main-content .navbar .nav-item{padding:1rem;cursor:pointer}.get-started .main-content .navbar .nav-link{position:relative;top:10%;transform:translateY(-50%)}}.get-started .main-content .navbar .nav-select{background-color:#fff}.get-started .main-content .navbar .nav-select .nav-link{color:#ee4c2c;font-weight:500}.get-started .main-content .navbar .nav-link{font-size:1.125rem;color:#8c8c8c}.get-started .main-content .navbar .nav-link:hover{color:#ee4c2c}.get-started .main-content .navbar .get-started-nav-link{padding-left:1.25rem;padding-right:1.25rem}@media screen and (min-width: 768px){.get-started .main-content .navbar .get-started-nav-link{padding-left:1.875rem;padding-right:1.875rem}}.get-started .main-content .navbar .nav-item{padding-top:.9375rem;padding-bottom:.9375rem}@media screen and (min-width: 768px){.get-started .main-content .navbar .nav-item{padding-bottom:0;padding-top:2rem}}@media (min-width: 768px) and (max-width: 1239px){.get-started .main-content .navbar .nav-item{padding-bottom:0;padding-top:2rem}}@media (max-width: 990px){.get-started .main-content .navbar .nav-item{padding-bottom:.625rem;padding-top:1rem}}.get-started .main-content .navbar .navbar-toggler{margin-left:2.5rem}.get-started .main-content{padding-top:0}@media screen and (min-width: 768px){.get-started .main-content{padding-top:1.9rem}}.get-started .quick-start-module{padding-bottom:0;padding-top:0;background-color:#fff}.get-started .quick-start-module .option,.get-started .quick-start-module #command{border:2px solid #fff;background:#f3f4f7}.get-started .quick-start-module .title-block{border:2px solid #fff}.get-started .quick-start-module .selected{background-color:#ee4c2c}.get-started .quick-start-module h1{font-size:2rem;letter-spacing:1.78px;line-height:2.5rem;text-transform:uppercase;margin-bottom:1.5rem}.get-started .nav-menu-wrapper{background-color:#f3f4f7}.get-started .nav-menu-wrapper .container{padding-left:0;padding-right:0}@media screen and (min-width: 768px){.get-started .nav-menu-wrapper .container{padding-left:30px;padding-right:30px}}.get-started .navbar-nav{flex-direction:row}#installation .os{display:none}#installation .selected{display:block}#cloud .platform{display:none}#cloud .selected{display:block}.screencast{display:none}.screencast iframe{width:100% !important}.get-started .quick-starts .row.ptbuild,.get-started .quick-starts .row.os,.get-started .quick-starts .row.package,.get-started .quick-starts .row.language,.get-started .quick-starts .row.cuda{margin-bottom:1.25rem}@media screen and (min-width: 768px){.get-started .quick-starts .row.ptbuild,.get-started .quick-starts .row.os,.get-started .quick-starts .row.package,.get-started .quick-starts .row.language,.get-started .quick-starts .row.cuda{margin-bottom:0}}@media (min-width: 768px) and (max-width: 1239px){.get-started .quick-starts{flex:0 0 100%;max-width:100%}}@media screen and (min-width: 768px){.get-started .quick-starts{margin-bottom:2.5rem}.get-started .quick-starts .row{margin-bottom:0}}@media screen and (min-width: 1240px){.get-started .quick-starts{margin-bottom:0}}.get-started .get-started-locally-sidebar{padding-top:2.5rem;padding-bottom:2.5rem;top:15%;z-index:385}@media screen and (min-width: 768px){.get-started .get-started-locally-sidebar{padding-top:0;max-height:100vh;overflow:auto}}.get-started .get-started-locally-sidebar ul{padding-left:0}.get-started .get-started-locally-sidebar li{list-style-type:none;line-height:36px}.get-started .get-started-locally-sidebar li a{color:#8c8c8c}.get-started .get-started-locally-sidebar li a.active,.get-started .get-started-locally-sidebar li a:hover{color:#ee4c2c}.get-started .get-started-locally-sidebar li .subitem{padding-left:1.25rem}.get-started .get-started-locally-sidebar li.subitem{padding-left:1.25rem}.cloud-nav{display:none}.get-started .get-started-cloud-sidebar{padding-top:3.125rem;padding-bottom:2.5rem;top:15%}.get-started .get-started-cloud-sidebar ul{padding-left:0}.get-started .get-started-cloud-sidebar li{list-style-type:none;line-height:36px}.get-started .get-started-cloud-sidebar li a{color:#8c8c8c}.get-started .get-started-cloud-sidebar li a.active,.get-started .get-started-cloud-sidebar li a:hover{color:#ee4c2c}.get-started .get-started-cloud-sidebar li .subitem{padding-left:1.25rem}.get-started .get-started-cloud-sidebar li.subitem{padding-left:1.25rem}.pytorch-2 .article-wrapper article.pytorch-article table tr td:first-of-type{padding-left:10px}.pytorch-2 .article-wrapper article.pytorch-article table,.pytorch-2 .article-wrapper article.pytorch-article td{border:1px solid #A0A0A1;padding:10px}.pytorch-2 .article-wrapper article.pytorch-article b,.pytorch-2 .article-wrapper article.pytorch-article em,.pytorch-2 .article-wrapper article.pytorch-article h3,.pytorch-2 .article-wrapper article.pytorch-article h2,.pytorch-2 .article-wrapper article.pytorch-article p,.pytorch-2 .article-wrapper article.pytorch-article a,.pytorch-2 .article-wrapper article.pytorch-article strong,.pytorch-2 .article-wrapper article.pytorch-article td,.pytorch-2 .article-wrapper article.pytorch-article tr{font-family:Verdana}.pytorch-2 .article-wrapper article.pytorch-article ul,.pytorch-2 .article-wrapper article.pytorch-article ol{margin:1.5rem 0 1.5rem 0}.pytorch-2 .article-wrapper article.pytorch-article ul li,.pytorch-2 .article-wrapper article.pytorch-article ol li{font-family:Verdana}.pytorch-2 .article-wrapper article.pytorch-article code{font-family:IBMPlexMono,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;padding:2px;color:inherit;background-color:#f1f1f1}.pytorch-2 .article-wrapper article.pytorch-article p,.pytorch-2 .article-wrapper article.pytorch-article a{font-family:Verdana;word-break:break-word}.pytorch-2 .article-wrapper article.pytorch-article p strong,.pytorch-2 .article-wrapper article.pytorch-article a strong{font-family:Verdana}@media screen and (max-width: 418px){.pytorch-2 .article-wrapper article.pytorch-article .QnATable{max-width:95vw}}.ecosystem .jumbotron{height:170px}@media screen and (min-width: 768px){.ecosystem .jumbotron{height:300px}}.ecosystem .jumbotron h1{padding-top:8.4375rem;color:#fff}.ecosystem .jumbotron p.lead{margin-bottom:1.5625rem;padding-top:1.25rem;color:#fff}.ecosystem .jumbotron .ecosystem-join{margin-bottom:3rem}.ecosystem .jumbotron svg{margin-bottom:1.25rem}@media screen and (min-width: 768px){.ecosystem .main-content{padding-top:3.25rem}}.ecosystem .main-content-wrapper{background-color:#f3f4f7;margin-top:340px}@media screen and (min-width: 768px){.ecosystem .main-content-wrapper{margin-top:435px}}.ecosystem.ecosystem-detail .main-content-wrapper{background-color:#fff}.ecosystem-cards-wrapper{margin-bottom:1.125rem;padding-top:1.25rem}@media (min-width: 768px){.ecosystem-cards-wrapper .col-md-6{flex:0 0 100%;max-width:100%}}@media screen and (min-width: 1240px){.ecosystem-cards-wrapper .col-md-6{flex:0 0 50%;max-width:50%}}.ecosystem .main-content-menu .navbar-nav .nav-link{font-size:1.125rem;color:#CCCDD1;padding-right:0;margin-right:1.875rem}.ecosystem .main-content-menu .navbar-nav .nav-link.selected{color:#ee4c2c;border-bottom:1px solid #ee4c2c}@media screen and (min-width: 768px){.ecosystem .main-content-menu .nav-item:last-of-type{position:absolute;right:0}.ecosystem .main-content-menu .nav-item:last-of-type a{margin-right:0}}.ecosystem.ecosystem-detail .main-content{padding-bottom:0}.ecosystem article.pytorch-article{counter-reset:article-list}.ecosystem article.pytorch-article>ol{padding-left:0;list-style-type:none}@media screen and (min-width: 1240px){.ecosystem article.pytorch-article>ol>li{position:relative}.ecosystem article.pytorch-article>ol>li:before{counter-increment:article-list;content:counter(article-list,decimal-leading-zero);color:#B932CC;line-height:2.5rem;letter-spacing:-0.34px;font-size:2rem;font-weight:300;position:absolute;left:-60px;top:-16px;padding:.625rem 0;background-color:#fff;z-index:10}.ecosystem article.pytorch-article>ol>li:after{content:"";width:2px;position:absolute;left:-42px;top:0;height:100%;background-color:#f3f3f3;z-index:9}}.ecosystem article.pytorch-article>ol>li>h4{color:#262626}.ecosystem article.pytorch-article>ol>li ul li{list-style-type:disc}.ecosystem .quick-starts{background:#ecedf1}.ecosystem .quick-starts .title-block,.ecosystem .quick-starts #command,.ecosystem .quick-starts .option,.ecosystem .quick-starts .cloud-option{border-color:#ecedf1}.ecosystem .join-link{color:inherit;text-decoration:underline}.ecosystem .join-notice{text-align:center;padding-top:1.25rem;padding-bottom:2.5rem}.ecosystem .join-notice p{color:#6c6c6d;margin-bottom:0;line-height:1.875rem}.ecosystem .join-jumbotron{width:90%}@media screen and (min-width: 768px){.ecosystem .join-jumbotron{height:262px}}.ecosystem .join-jumbotron .container{max-width:920px}.ecosystem .join-jumbotron h1{padding-top:.3125rem;color:#fff}.ecosystem .join-jumbotron h1 span{font-weight:300}.ecosystem .join-wrapper{background-color:#f3f4f7}@media screen and (min-width: 768px){.ecosystem .join-wrapper .main-content{padding-top:1.5rem}}.ecosystem .join-wrapper .container{max-width:920px}.ecosystem .join-wrapper #success-response{color:#6c6c6d}.ecosystem .join-intro{color:#6c6c6d;line-height:28px}.ecosystem .requirements span{color:#000;font-weight:bold}.ecosystem .requirements .join-number{color:#812CE5;display:flex;align-items:center}@media screen and (min-width: 768px){.ecosystem .requirements .join-number{padding-left:.625rem}}.ecosystem .requirements p{margin-bottom:0;margin-top:-.4375rem}@media screen and (min-width: 768px){.ecosystem .requirements p{padding-left:1.5rem}}@media screen and (min-width: 768px){.ecosystem .requirements .col-md-11{border-left:2px solid #f3f4f7}}.ecosystem .row.requirements{padding-bottom:2.5rem}.ecosystem .experimental .ecosystem-card-title-container{display:inline-flex}.ecosystem .experimental .ecosystem-card-title-container .experimental-badge{text-transform:uppercase;margin-left:15px;background-color:#e4e4e4;color:#262626;opacity:0.75;font-size:.625rem;letter-spacing:1px;line-height:1.375rem;height:1.25rem;width:6rem;text-align:center;margin-top:.25rem}.ecosystem .ecosystem-card-title-container .card-title{padding-left:0;font-size:1.5rem;color:#262626}.ecosystem .star-list{list-style:none;padding-left:0}.ecosystem .star-list li{display:inline}.ecosystem .star-list li.github-stars-count-whole-number{display:none}.ecosystem .icon-count-container{display:inline-block;vertical-align:text-bottom;margin-left:.5rem}.ecosystem .github-logo{height:15px;width:13px;margin-left:10px}.ecosystem .github-stars-count{color:#797676;position:relative;top:.25rem;font-size:14px;margin-left:0.125rem}@media screen and (min-width: 768px){.ecosystem .github-stars-count{top:.1875rem;font-size:initial}}.ecosystem-divider{position:relative;margin-bottom:4rem;margin-top:1.5rem;top:3rem}.ecosystem #dropdownSort,.ecosystem #dropdownSortLeft{margin-left:0}.ecosystem #dropdownSortLeft{font-size:19px;top:inherit;right:inherit}.ecosystem-filter-menu ul{list-style-type:none;padding-left:1.25rem}.ecosystem-filter-menu ul li{padding-right:1.25rem;word-break:break-all}.ecosystem-filter-menu ul li a{color:#797676}.ecosystem-filter-menu ul li a:hover{color:#ee4c2c}.ecosystem .ecosystem-filter{cursor:pointer}.ecosystem .ecosystem-filter ul{list-style-type:none}.ecosystem #dropdownFilter,#dropdownSort,#dropdownSortLeft{color:#797676;cursor:pointer;z-index:1;position:absolute}.ecosystem .pagination .page{border:1px solid #dee2e6;padding:0.5rem 0.75rem}.ecosystem .pagination .active .page{background-color:#dee2e6}.ecosystem-form .hbspt-form{padding-bottom:3rem}.ecosystem-form .hbspt-form .hs-form-field{width:100%}.ecosystem-form .hbspt-form .hs-form-field .input input{width:100%;border:none;border-bottom:2px solid #812CE5;height:2.75rem;outline:none;padding-left:.9375rem;margin-bottom:1.875rem}.ecosystem-form .hbspt-form .hs-richtext h3{text-transform:uppercase;padding-top:1.5625rem;padding-bottom:1.875rem}.ecosystem-form .hbspt-form label{color:#6c6c6d}.ecosystem-form .hbspt-form textarea{width:100%;border:none;border-bottom:2px solid #812CE5;outline:none;padding-left:.9375rem;margin-bottom:1.875rem;height:5.625rem;padding-top:.625rem}.ecosystem-form .hbspt-form ::-moz-placeholder{color:#6c6c6d;opacity:0.5}.ecosystem-form .hbspt-form :-ms-input-placeholder{color:#6c6c6d;opacity:0.5}.ecosystem-form .hbspt-form ::-ms-input-placeholder{color:#6c6c6d;opacity:0.5}.ecosystem-form .hbspt-form ::placeholder{color:#6c6c6d;opacity:0.5}.ecosystem-form .hbspt-form .actions{display:flex;width:100%;justify-content:center}.ecosystem-form .hbspt-form .hs-button{padding-left:.75rem;margin-top:2.5rem;background-color:#ee4c2c;color:#fff;cursor:pointer;border:none;width:30%;height:2.8125rem;text-align:left;background-repeat:no-repeat;background-image:url(/assets/images/arrow-right-with-tail-white.svg);background-size:30px 12px;background-position:right}@media screen and (min-width: 768px){.ecosystem-form .hbspt-form .hs-button{padding-left:1.125rem;background-origin:content-box;background-size:30px 15px}}.features .main-content{padding-bottom:0}.features .navbar-nav .nav-link{color:#000}.features .nav-logo{background-image:url("/assets/images/logo-dark.svg")}@media screen and (min-width: 768px){.features .main-background{height:575px}}.features .main-content-wrapper{margin-top:350px}@media screen and (min-width: 768px){.features .main-content-wrapper{margin-top:540px}}.features-row{padding-bottom:3.75rem;align-items:center}.features-row:first-of-type{margin-top:1.25rem}.features-row:last-of-type{padding-bottom:4.5rem}@media screen and (min-width: 768px){.features-row{padding-bottom:6rem}.features-row:first-of-type{margin-top:4.05rem}}.features-row h3{font-size:2rem;letter-spacing:1.78px;line-height:2.25rem;font-weight:400;text-transform:uppercase;margin-bottom:1.25rem;font-weight:300}@media (min-width: 768px) and (max-width: 1239px){.features-row h3{width:80%}}@media screen and (min-width: 1240px){.features-row h3{width:590px}}.features-row p{font-size:1.125rem;letter-spacing:0.25px;line-height:1.75rem;color:#6c6c6d;padding-right:1.875rem}@media (min-width: 768px) and (max-width: 1239px){.features-row p{width:80%}}@media screen and (min-width: 1240px){.features-row p{width:590px}}.features-row .feature-content-holder{width:100%}@media screen and (min-width: 1240px){.features-row .feature-content-holder{width:495px}}.features-row .feature-content-holder pre.highlight{margin-bottom:0}.features-row:nth-child(odd) .col-md-6:nth-child(1n){order:2}.features-row:nth-child(odd) .col-md-6:nth-child(2n){order:1}@media screen and (min-width: 768px){.features-row:nth-child(odd) .col-md-6:nth-child(1n){order:1}.features-row:nth-child(odd) .col-md-6:nth-child(2n){order:2}}.features-row:nth-child(1n) h3{color:#B73BC9}.features-row:nth-child(1n) .feature-content-holder{border-bottom:2px solid #B73BC9}.features-row:nth-child(2n) h3{color:#D92F4C}.features-row:nth-child(2n) .feature-content-holder{border-bottom:2px solid #D92F4C}.features-row:nth-child(3n) h3{color:#8038E0}.features-row:nth-child(3n) .feature-content-holder{border-bottom:2px solid #8038E0}@media screen and (min-width: 1240px){.features-row .col-md-6{padding-left:0;padding-right:0}}@media screen and (min-width: 768px){.features-row .col-md-6:nth-of-type(2) .feature-content{width:100%}.features-row .col-md-6:nth-of-type(2) .feature-content h3,.features-row .col-md-6:nth-of-type(2) .feature-content p,.features-row .col-md-6:nth-of-type(2) .feature-content .feature-content-holder{float:right}}.features .jumbotron{height:200px}@media screen and (min-width: 768px){.features .jumbotron{height:195px}}@media (max-width: 320px){.features .jumbotron{height:250px}}.features .jumbotron h1{padding-top:1.875rem}@media screen and (min-width: 768px){.features .jumbotron{height:468px}.features .jumbotron h1{padding-top:0}}.features .jumbotron h1,.features .jumbotron p{color:#fff}@media screen and (min-width: 768px){.features .jumbotron .btn{margin-top:.375rem}}.resources .jumbotron{align-items:flex-end;color:#fff;height:220px}@media screen and (min-width: 768px){.resources .jumbotron{height:300px}}.resources .jumbotron h1{padding-top:8.4375rem}.resources .jumbotron p.lead{margin-bottom:1.5625rem;padding-top:1.25rem}.resources .main-content-wrapper{margin-top:385px;margin-bottom:0.75rem}@media screen and (min-width: 768px){.resources .main-content-wrapper{margin-top:475px}}@media screen and (min-width: 768px){.resources .resource-card{margin-bottom:2.25rem}}.quick-starts{background:#f3f4f7}.quick-starts .col-md-2-4{position:relative;width:100%;min-height:1px;padding-right:15px;padding-left:15px}@media (min-width: 768px){.quick-starts .col-md-2-4{flex:0 0 20%;max-width:20%}}.quick-starts .start-locally-col{margin-bottom:1.25rem}.quick-starts .start-locally-col .row.ptbuild,.quick-starts .start-locally-col .row.os,.quick-starts .start-locally-col .row.package,.quick-starts .start-locally-col .row.language,.quick-starts .start-locally-col .row.cuda{margin-bottom:1.25rem}@media screen and (min-width: 768px){.quick-starts .start-locally-col .row.ptbuild,.quick-starts .start-locally-col .row.os,.quick-starts .start-locally-col .row.package,.quick-starts .start-locally-col .row.language,.quick-starts .start-locally-col .row.cuda{margin-bottom:0}}@media (min-width: 768px) and (max-width: 1239px){.quick-starts .start-locally-col{flex:0 0 100%;max-width:100%}}@media screen and (min-width: 768px){.quick-starts .start-locally-col{margin-bottom:2.5rem}.quick-starts .start-locally-col .row{margin-bottom:0}}@media screen and (min-width: 1240px){.quick-starts .start-locally-col{margin-bottom:0}}.quick-starts .start-locally-col pre{font-size:80% !important;background-color:#ffffff !important}.quick-starts .start-locally-col .prev-versions-btn{margin-top:30px}@media (min-width: 768px) and (max-width: 1239px){.quick-starts .cloud-options-col{flex:0 0 100%;max-width:100%;margin-left:0;margin-top:1.25rem}}.quick-starts p{font-size:1.125rem;line-height:1.75rem}.quick-starts .card-body{flex:1 1 auto}.quick-starts .cloud-option-image{margin-left:.9375rem;margin-right:1.5625rem;margin-bottom:.3125rem}.quick-starts .cloud-option-row{margin-left:0;cursor:pointer}.quick-starts .option{border:2px solid #f3f4f7;font-size:1rem;color:#6c6c6d;letter-spacing:-0.22px;line-height:1.25rem;background:#fff;cursor:pointer}.quick-starts .option:hover{background-color:#ee4c2c;color:#fff}.quick-starts .selected{background-color:#ee4c2c;color:#fff}.quick-starts .block{margin-bottom:.0625rem;height:2.5rem;display:flex;align-items:center}.quick-starts .title-block{margin:.0625rem;height:2.5rem;border:2px solid #f3f4f7;font-size:1rem;color:#6c6c6d;line-height:1.25rem;display:flex;align-items:center}.quick-starts .title-block:before{display:block;content:".";color:transparent;border-left:2px solid #CCCDD1;height:100%;position:absolute;left:0}.quick-starts #command{color:#4a4a4a;background-color:#fff;padding:.9375rem;border:2px solid #f3f4f7;word-wrap:break-word;display:table-cell;vertical-align:middle}.quick-starts #command a{font-size:125%}@media screen and (min-width: 768px){.quick-starts #command a:hover{color:#ee4c2c}}.quick-starts #command pre{word-break:break-all;white-space:normal}.quick-starts .command-container{display:table;width:100%}@media screen and (min-width: 768px){.quick-starts .command-container{min-height:5.25rem}}.quick-starts .command-container pre{margin-bottom:0px;padding:0px;font-size:75%;background-color:#f3f4f7}.quick-starts .command-block{height:5.25rem;word-wrap:break-word;color:#6c6c6d}.quick-starts .command-block:before{border-left:2px solid #000}.quick-starts .quick-start-link{color:#6c6c6d}.quick-starts .mobile-heading{display:flex;align-items:center;font-weight:400}@media screen and (min-width: 768px){.quick-starts .mobile-heading{display:none}}.quick-starts .command-mobile-heading{display:flex;align-items:center;font-weight:400;color:#000}@media screen and (min-width: 768px){.quick-starts .command-mobile-heading{display:none}}.quick-starts .headings{display:none}@media screen and (min-width: 768px){.quick-starts .headings{display:block}}.quick-starts .cloud-options-col{margin-top:1.25rem}@media screen and (min-width: 768px){.quick-starts .cloud-options-col{margin-top:0}}@media (max-width: 978px){.quick-starts .os-text{margin-top:0}}.quick-start-guides{font-size:1.125rem;letter-spacing:0.25px;line-height:2.25rem;color:#CCCDD1}.quick-start-guides .select-instructions{color:#262626;border-bottom:2px solid #CCCDD1;margin-bottom:1rem;display:inline-block}@media screen and (min-width: 768px){.quick-start-guides .select-instructions{margin-bottom:0}}.quick-start-module{padding-top:2.5rem;padding-bottom:2.5rem}.quick-start-module .option-module{float:right}@media screen and (min-width: 768px){.quick-start-module{padding-top:4rem;padding-bottom:4.125rem}}.quick-start-module p{color:#6c6c6d;font-size:1.125em;letter-spacing:0.25px;padding-bottom:.9375rem;margin-bottom:1.4rem}.quick-start-module h3{font-size:1.5rem;letter-spacing:1.33px;line-height:2rem;text-transform:uppercase;margin-bottom:2.1rem}.quick-starts .cloud-option-body{display:flex;align-items:center;height:64px;padding:0 0 0 5rem;position:relative;background-image:url("/assets/images/chevron-right-orange.svg");background-size:6px 13px;background-position:center right 15px;background-repeat:no-repeat}@media screen and (min-width: 768px){.quick-starts .cloud-option-body:after{content:"";display:block;width:0;height:1px;position:absolute;bottom:0;left:0;background-color:#ee4c2c;transition:width .250s ease-in-out}.quick-starts .cloud-option-body:hover:after{width:100%}.quick-starts .cloud-option-body:hover{color:#262626}}@media screen and (min-width: 768px){.quick-starts .cloud-option-body{padding-right:2rem}}@media (min-width: 768px) and (max-width: 1239px){.quick-starts .cloud-option-body{padding-right:1.25rem}}@media screen and (min-width: 768px){.quick-starts .cloud-option-body{background-size:8px 14px}}.quick-starts .cloud-option-body:before{opacity:0.5;position:absolute;left:1.875rem;top:21px}.quick-starts .cloud-option-body.aws:before{content:url("/assets/images/aws-logo.svg")}.quick-starts .cloud-option-body.microsoft-azure:before{content:url("/assets/images/microsoft-azure-logo.svg")}.quick-starts .cloud-option-body.lightning-studios:before{content:url("/assets/images/lightning-studios-logo.svg")}.quick-starts .cloud-option-body.google-cloud:before{content:url("/assets/images/google-cloud-logo.svg")}.quick-starts .cloud-option-body.colab:before{content:url("/assets/images/colab-logo.svg")}@media screen and (min-width: 768px){.quick-starts .cloud-option-body:hover:before{opacity:1}}.quick-starts .cloud-option{background-color:#fff;margin-bottom:.125rem;border:2px solid #f3f4f7;font-size:1.125rem;letter-spacing:-0.25px;line-height:1.875rem;color:#262626}.quick-starts .cloud-option #microsoft-azure p{color:#262626;margin:0;padding:0;font-size:inherit;line-height:1.3rem}.quick-starts .cloud-option #microsoft-azure span{margin-bottom:0;padding-bottom:0;color:#ee4c2c;padding:0px 35px 0px 8px;font-style:italic;line-height:1.3rem}@media (min-width: 768px) and (max-width: 1239px){.quick-starts .cloud-option{font-size:1rem}}.quick-starts .cloud-option ul{display:none;width:100%;margin:0 0 1.25rem 0;padding:0}.quick-starts .cloud-option ul li{margin-top:0;position:relative;padding-left:5rem}@media (min-width: 768px) and (max-width: 1239px){.quick-starts .cloud-option ul li{font-size:1rem}}.quick-starts .cloud-option ul li a{color:#6c6c6d;letter-spacing:-0.25px;line-height:30px}@media screen and (min-width: 768px){.quick-starts .cloud-option ul li a:hover{color:#ee4c2c}}@media screen and (min-width: 768px){.quick-starts .cloud-option ul li:hover:before{content:"\2022";color:#ee4c2c;position:absolute;left:36px}}.quick-starts .cloud-option ul li:first-of-type{margin-top:1.25rem}.quick-starts .cloud-option.open .cloud-option-body{background-image:url("/assets/images/chevron-down-orange.svg");background-size:14px 14px;border-bottom:1px solid #ee4c2c;color:#262626}@media screen and (min-width: 768px){.quick-starts .cloud-option.open .cloud-option-body{border-bottom:none}}.quick-starts .cloud-option.open .cloud-option-body:after{width:100%}.quick-starts .cloud-option.open .cloud-option-body:before{opacity:1}.quick-starts .cloud-option.open ul{display:block}.blog .navbar-nav .nav-link{color:#000}.blog .main-content{padding-bottom:1.5rem}@media screen and (min-width: 768px){.blog .main-content{padding-top:1.70rem;padding-bottom:3.5rem}}.blog .main-background{height:290px}@media screen and (min-width: 768px){.blog .main-background{height:485px}}.blog .blog-detail-background{height:300px}@media screen and (min-width: 768px){.blog .blog-detail-background{height:312px}}.blog .main-content-menu .navbar-nav .nav-link{text-transform:capitalize}.blog .main-content-menu .navbar-nav .nav-link.selected{color:#ee4c2c !important;text-decoration:underline;-webkit-text-decoration-color:#ee4c2c;text-decoration-color:#ee4c2c;opacity:0.75 !important}@media screen and (min-width: 768px){.blog .main-content-menu .nav-item:last-of-type{position:absolute;right:0}.blog .main-content-menu .nav-item:last-of-type a{margin-right:0}}.blog .zoom-in{cursor:zoom-in}.blog .zoomed{cursor:zoom-out}.blog .zoomed img{margin:auto !important;position:absolute;top:0;left:0;right:0;bottom:0;max-width:98%}.blog .nav-logo{background-image:url("/assets/images/logo-dark.svg")}.blog .main-content-wrapper{margin-top:275px}.blog .main-content-wrapper .row.blog-index{margin-top:30px}.blog .main-content-wrapper .row.blog-index p{color:#6c6c6d}.blog .main-content-wrapper .row.blog-vertical{display:block;max-width:100%;margin:auto}.blog .main-content-wrapper .row.blog-vertical .col-md-4{display:initial}.blog .main-content-wrapper .row.blog-vertical .btn{float:left}.blog .main-content-wrapper .vertical-blog-container{border-bottom:1px solid #E2E2E2;padding-bottom:3rem}.blog .main-content-wrapper .vertical-blog-container:last-of-type{margin-bottom:2rem}@media screen and (min-width: 768px){.blog .main-content-wrapper{margin-top:470px}.blog .main-content-wrapper .row.blog-index [class*="col-"]:not(:first-child):not(:last-child):not(:nth-child(3n)){padding-right:2.1875rem;padding-left:2.1875rem}.blog .main-content-wrapper .row.blog-index [class*="col-"]:nth-child(3n){padding-left:2.1875rem}.blog .main-content-wrapper .row.blog-index [class*="col-"]:nth-child(3n+1){padding-right:2.1875rem}.blog .main-content-wrapper .col-md-4{margin-bottom:1.4375rem}}.blog .main-content-wrapper h4 a{font-family:FreightSans;font-size:1.5rem;color:#000;letter-spacing:0;line-height:2rem;font-weight:400}.blog .main-content-wrapper .author{color:#ee4c2c;font-size:1.25rem;letter-spacing:0.25px;line-height:1.875rem;margin-bottom:1.875rem}.blog .main-content-wrapper .author-icon{position:relative;top:1.625rem;height:1.0625rem;width:1.1875rem}.blog .blog-detail-content{padding-bottom:2.8rem}@media screen and (min-width: 768px){.blog .blog-detail-wrapper{margin-top:324px}}.blog .jumbotron{top:6.5625rem}@media screen and (min-width: 768px){.blog .jumbotron{height:25.3125rem}}@media screen and (min-width: 768px){.blog .jumbotron .container{padding-bottom:2.8125rem}}.blog .jumbotron .blog-index-title{overflow:hidden;margin-top:1.5rem;white-space:nowrap;text-overflow:ellipsis;color:white}@media screen and (min-width: 768px){.blog .jumbotron .blog-index-title{overflow:unset;white-space:unset;text-overflow:unset}}.blog .jumbotron h1{letter-spacing:-1.65px;font-size:3.25rem;line-height:3.5rem;text-transform:none;color:#fff}.blog .jumbotron h1 a{color:#fff;word-wrap:break-word}.blog .jumbotron h2{color:#fff}.blog .jumbotron .blog-title{display:inline-flex}.blog .jumbotron .blog-title:hover{color:#fff}.blog .jumbotron .blog-detail-container{padding-top:4rem}@media screen and (min-width: 768px){.blog .jumbotron .blog-detail-container{padding-top:10.875rem}}.blog .jumbotron p{font-size:1.25rem;letter-spacing:0;line-height:1.875rem;color:#fff}.blog .jumbotron .btn{margin-top:.75rem;padding-top:.5625rem}.blog .jumbotron .blog-page-container p.blog-date{padding-top:.625rem}.blog .jumbotron .blog-page-container .btn{margin-bottom:.625rem}.blog .blog-detail-jumbotron{top:45px}@media screen and (min-width: 768px){.blog .blog-detail-jumbotron{height:107px;top:75px}}.blog p.blog-date{font-size:1.125rem;letter-spacing:0;line-height:1.5rem;margin-bottom:.625rem;color:#6c6c6d}.blog p.featured-post{font-size:1.125rem;letter-spacing:0;line-height:1.5rem;margin-bottom:.625rem;color:#fff}.blog p.featured-blog-preview{margin-bottom:.75rem}.blog #blogPostFilter .nav-link{opacity:0.53;font-size:1.25rem;color:#000;letter-spacing:0;line-height:2.125rem}.blog .page-link{font-size:1.25rem;letter-spacing:0;line-height:2.125rem;color:#ee4c2c;width:7.5rem;text-align:center}.blog .blog-modal{max-width:75%;top:5rem}.blog .blog-modal:hover{cursor:zoom-out}@media (max-width: 575px){.blog .blog-modal{max-width:100%;top:10rem}}.blog .blog-image{cursor:zoom-in}@media (max-width: 1067px){.blog .jumbotron h1{margin-right:0;margin-top:1.5rem}.blog .jumbotron h1 a{font-size:2.8125rem;line-height:2.5rem}.blog .main-content-wrapper .col-md-4{margin-bottom:4.6875rem}.blog .similar-posts{margin-bottom:3.125rem}}@media (max-width: 1050px){.blog .main-content-wrapper .author-icon{left:-1.875rem}}.blog table tr th{font-weight:600}.blog .pytorch-article .enterprise-azure-logo-container{padding-left:0}.blog .pytorch-article .enterprise-azure-logo-container img{margin-bottom:0}.blog .pytorch-article img{margin-bottom:1.125rem}twitterwidget{margin:0 auto;margin-top:1.125rem !important;margin-bottom:1.125rem !important}.pytorch-article .outlined-code-block{border:1px solid black;padding:1rem;margin-bottom:1rem}.pytorch-article .outlined-code-block pre{margin:0;padding:0;background-color:white}.pytorch-article .reference-list li{overflow-wrap:anywhere}.similar-posts-module{background:#f3f4f7}.similar-posts-module p.blog-date{font-size:1.125rem;color:#CCCDD1;letter-spacing:0;line-height:1.5rem}.similar-posts-module h4 a{font-family:FreightSans;font-size:1.5rem;color:#000;letter-spacing:0;line-height:2rem;font-weight:400}.similar-posts-module .module-content{margin-bottom:2.1875rem}.similar-posts-module .module-content .navbar-nav{margin-top:3.75rem}.similar-posts-module .module-content .module-heading{text-transform:uppercase;color:#000;font-size:1.5rem;letter-spacing:.083125rem;line-height:2rem;font-weight:400}@media screen and (min-width: 768px){.similar-posts-module .module-content .nav-item:last-of-type{position:absolute;right:0}.similar-posts-module .module-content .nav-item:last-of-type a{margin-right:0}}.similar-posts-module .see-more-posts{color:#000;font-size:1.125rem;letter-spacing:-0.25px;line-height:1.875rem;top:.125rem}input[type='search']{-moz-appearance:none;-webkit-appearance:none}.navSearchWrapper{align-items:center;align-self:center;display:flex;justify-content:center;position:relative;right:10px;top:15px;margin-left:0;padding-bottom:20px}@media screen and (min-width: 768px){.navSearchWrapper{position:absolute;margin-left:30px;display:block;padding-left:3px;padding-bottom:0}}.tabletSearchWrapper{top:0px}@media (min-width: 768px) and (max-width: 1239px){.tabletSearchWrapper{padding-bottom:20px;position:relative;margin-left:0}}.navSearchWrapper .aa-dropdown-menu{background:#f9f9f9;border:3px solid rgba(57,57,57,0.25);color:#393939;font-size:.875rem;left:auto !important;line-height:1.2em;right:0 !important}.navSearchWrapper .aa-dropdown-menu .algolia-docsearch-suggestion--category-header{background:#000;color:white;font-size:.875rem;font-weight:400}.navSearchWrapper .aa-dropdown-menu .algolia-docsearch-suggestion--category-header .algolia-docsearch-suggestion--highlight{background-color:#000;color:#fff}.navSearchWrapper .aa-dropdown-menu .algolia-docsearch-suggestion--title .algolia-docsearch-suggestion--highlight,.navSearchWrapper .aa-dropdown-menu .algolia-docsearch-suggestion--subcategory-column .algolia-docsearch-suggestion--highlight{color:#000}.navSearchWrapper .aa-dropdown-menu .algolia-docsearch-suggestion__secondary,.navSearchWrapper .aa-dropdown-menu .algolia-docsearch-suggestion--subcategory-column{border-color:rgba(57,57,57,0.3)}@media screen and (min-width: 768px){.navSearchWrapper .algolia-autocomplete .algolia-docsearch-suggestion--subcategory-column{word-wrap:normal}}input#search-input{background-color:inherit;border:none;border-radius:20px;color:#000;font-size:1.125rem;font-weight:300;line-height:20px;outline:none;padding-left:25px;position:relative;transition:0.5s width ease;display:none;width:220px;background-image:url("/assets/images/search-icon.svg");background-size:12px 15px;background-repeat:no-repeat;background-position:8px 5px}input#search-input:hover{background-image:url("/assets/images/search-icon-orange.svg")}input#mobile-search-input{font-size:2rem;background-color:transparent;color:#fff;border:none;outline:none;padding-left:25px;position:relative;border-top-left-radius:20px;border-bottom-left-radius:20px;width:300px;display:block}input#search-input:focus,input#search-input:active{color:#000}.navigationSlider .slidingNav .navSearchWrapper .algolia-docsearch-footer a{height:auto}@media only screen and (max-width: 735px){.navSearchWrapper{width:100%}}input::-moz-placeholder{color:#e5e5e5}input:-ms-input-placeholder{color:#e5e5e5}input::-ms-input-placeholder{color:#e5e5e5}input::placeholder{color:#e5e5e5}.hljs{padding:1.25rem 1.5rem}@media only screen and (max-width: 1024px){.reactNavSearchWrapper input#search-input{background-color:rgba(242,196,178,0.25);border:none;border-radius:20px;box-sizing:border-box;color:#393939;font-size:.875rem;line-height:20px;outline:none;padding-left:25px;position:relative;transition:background-color 0.2s cubic-bezier(0.68, -0.55, 0.265, 1.55),width 0.2s cubic-bezier(0.68, -0.55, 0.265, 1.55),color 0.2s ease;width:100%}.reactNavSearchWrapper input#search-input:focus,.reactNavSearchWrapper input#search-input:active{background-color:#000;color:#fff}.reactNavSearchWrapper .algolia-docsearch-suggestion--subcategory-inline{display:none}.reactNavSearchWrapper>span{width:100%}.reactNavSearchWrapper .aa-dropdown-menu{font-size:.75rem;line-height:2em;padding:0;border-width:1px;min-width:500px}.reactNavSearchWrapper .algolia-docsearch-suggestion__secondary{border-top:none}.aa-suggestions{min-height:140px;max-height:60vh;-webkit-overflow-scrolling:touch;overflow-y:scroll}}@media only screen and (min-width: 1024px){.navSearchWrapper{padding-left:10px;position:relative;right:auto;top:auto}}@media only screen and (min-width: 1024px) and (min-width: 768px){.navSearchWrapper{padding-left:3px;right:10px;margin-left:0}}@media only screen and (min-width: 1024px){.navSearchWrapper .algolia-autocomplete{display:block}.tabletSearchWrapper{right:10px}}@media only screen and (max-width: 735px){.reactNavSearchWrapper .aa-dropdown-menu{min-width:400px}}@media only screen and (max-width: 475px){.reactNavSearchWrapper .aa-dropdown-menu{min-width:300px}}.search-border{display:none;flex-direction:row;border:none;background-color:transparent;border-radius:20px;width:100%;float:right}@media screen and (min-width: 768px){.search-border{display:flex}}.mobile-search-border{flex-direction:row;border:none;background-color:rgba(255,255,255,0.1);border-radius:20px;width:100%;float:right;display:flex}@media (min-width: 768px) and (max-width: 1239px){.mobile-search-border{border-radius:25px}}#close-search{color:#ee4c2c;padding-right:10px;font-size:.99em;display:none;cursor:pointer}.active-header{margin-top:-1px}.active-search-icon{background-image:url("/assets/images/search-icon-orange.svg") !important;display:inline-block !important}.active-background{background-color:#f3f4f7;width:50%;padding:4px}.homepage-header input#search-input{background-image:url("/assets/images/search-icon-white.svg");color:#fff}.homepage-header input#search-input:focus,.homepage-header input#search-input:active{color:#fff}.homepage-header .active-background{background-color:#88888833}.homepage-header #close-search{color:#fff;opacity:0.5}.homepage-header #close-search:hover{color:#ee4c2c}.homepage-header #search-icon{background-image:url(/assets/images/search-icon-white.svg)}.homepage-header #search-icon:hover{background-color:#88888833}#search-icon{background-image:url(/assets/images/search-icon.svg);color:transparent;width:33px;height:33px;background-size:21px 21px;background-repeat:no-repeat;background-position:6px 5px;border-radius:25px;cursor:pointer}#search-icon:hover{background-color:#f3f4f7}#mobile-search-icon{background-image:url(/assets/images/search-icon-white.svg);width:30px;height:38px;background-size:16px 28px;background-repeat:no-repeat;background-position:0px 5px;cursor:pointer;border-top-right-radius:20px;border-bottom-right-radius:20px}@media (min-width: 768px) and (max-width: 1239px){#mobile-search-icon{height:50px;width:35px;background-size:20px 42px}}.navSearchWrapper .algolia-autocomplete .ds-dropdown-menu{min-width:330px;height:500px;overflow-y:scroll}@media screen and (min-width: 768px){.navSearchWrapper .algolia-autocomplete .ds-dropdown-menu{height:auto;min-width:700px;overflow-y:hidden}}@media (min-width: 768px) and (max-width: 1239px){.navSearchWrapper .algolia-autocomplete .ds-dropdown-menu{height:700px;overflow-y:scroll}}@media (min-width: 769px) and (max-width: 1024px){.navSearchWrapper .algolia-autocomplete .ds-dropdown-menu{min-width:950px}}.cookie-banner-wrapper{display:none}.cookie-banner-wrapper.is-visible{display:block;position:fixed;bottom:0;background-color:#f3f4f7;min-height:100px;width:100%;z-index:401;border-top:3px solid #ededee}.cookie-banner-wrapper .gdpr-notice{color:#6c6c6d;margin-top:1.5625rem;text-align:left;max-width:1440px}@media screen and (min-width: 768px){.cookie-banner-wrapper .gdpr-notice{width:77%}}@media (min-width: 768px) and (max-width: 1239px){.cookie-banner-wrapper .gdpr-notice{width:inherit}}.cookie-banner-wrapper .gdpr-notice .cookie-policy-link{color:#343434}.cookie-banner-wrapper .close-button{-webkit-appearance:none;-moz-appearance:none;appearance:none;background:transparent;border:1px solid #f3f4f7;height:1.3125rem;position:absolute;bottom:42px;right:0;top:0;cursor:pointer;outline:none}@media screen and (min-width: 768px){.cookie-banner-wrapper .close-button{right:20%;top:inherit}}@media (min-width: 768px) and (max-width: 1239px){.cookie-banner-wrapper .close-button{right:0;top:0}}.hub .jumbotron{height:300px}@media screen and (min-width: 768px){.hub .jumbotron{height:420px}}.hub .jumbotron h1{color:#fff}.hub .jumbotron h1 #hub-header,.hub .jumbotron h1 #hub-sub-header{font-weight:lighter}.hub .jumbotron p.lead,.hub .jumbotron p.hub-release-message{margin-bottom:1.5625rem;padding-top:1.5625rem;color:#fff}@media screen and (min-width: 768px){.hub .jumbotron p.lead,.hub .jumbotron p.hub-release-message{width:77%}}.hub .jumbotron p.hub-release-message{padding-top:0;font-style:italic}.hub .jumbotron svg{margin-bottom:1.25rem}.hub .jumbotron p.detail-lead{padding-top:3.125rem;color:#797676;width:100%;margin-bottom:0px}.hub .jumbotron p.lead-summary{color:#6c6c6d}.hub.hub-index .jumbotron{height:280px}@media screen and (min-width: 768px){.hub.hub-index .jumbotron{height:325px}}.hub .detail-github-link{background:#ee4c2c;color:#fff}.hub .detail-colab-link{background:#ffc107;color:#000}.hub .detail-web-demo-link{background:#4a9fb5;color:#fff}.hub .detail-colab-link,.hub .detail-github-link,.hub .detail-web-demo-link{margin-top:1rem}.hub .detail-button-container{margin-top:2.8125rem}@media (min-width: 768px) and (max-width: 1239px){.hub .detail-button-container{margin-top:1.25rem}}@media (max-width: 320px){.hub .detail-button-container{margin-top:1.25rem}}@media (max-width: 360px){.hub .detail-button-container{margin-top:1.25rem}}.hub a .detail-colab-link,.hub a .detail-github-link{padding-right:3.125rem}.hub .detail-arrow{color:#ee4c2c;font-size:2.5rem}@media screen and (min-width: 768px){.hub .detail-arrow{font-size:4.5rem}}.hub .with-right-white-arrow{padding-right:2rem;position:relative;background-image:url("/assets/images/chevron-right-white.svg");background-size:6px 13px;background-position:top 10px right 11px;background-repeat:no-repeat}@media screen and (min-width: 768px){.hub .with-right-white-arrow{background-size:8px 14px;background-position:top 15px right 12px;padding-right:2rem}}.hub .main-content{padding-top:8.75rem}@media screen and (min-width: 768px){.hub .main-content{padding-top:8.4375rem}}@media (max-width: 320px){.hub .main-content{padding-top:10rem}}.hub.hub-detail .main-content{padding-top:12.5rem}@media screen and (min-width: 768px){.hub.hub-detail .main-content{padding-top:9.375rem}}.hub.hub-detail .jumbotron{height:350px}@media screen and (min-width: 768px){.hub.hub-detail .jumbotron{height:400px}}.hub .main-content-wrapper{background-color:#f3f4f7;margin-top:300px}@media screen and (min-width: 768px){.hub .main-content-wrapper{margin-top:395px}}.hub-feedback-button{border:2px solid #e2e2e2;color:#A0A0A1;padding-left:0;padding-right:5rem;font-size:1rem;width:13rem}.hub-feedback-button:after{bottom:-1px}.hub-flag{background-image:url("/assets/images/feedback-flag.svg");background-size:15px 20px;background-position:center right 10px;background-repeat:no-repeat}#hub-icons{height:2rem}@media (max-width: 480px){#hub-icons{position:initial;padding-left:0;padding-top:1rem}}.hub.hub-detail .main-content-wrapper{margin-top:305px}@media screen and (min-width: 768px){.hub.hub-detail .main-content-wrapper{margin-top:390px}}@media (min-width: 768px) and (max-width: 1239px){.hub.hub-detail .main-content-wrapper{margin-top:490px}}@media (max-width: 320px){.hub.hub-detail .main-content-wrapper{margin-top:330px}}.hub .hub-cards-wrapper,.hub-cards-wrapper-right{margin-bottom:1.125rem;padding-top:1.25rem}.hub .hub-cards-wrapper .card-body .card-summary,.hub-cards-wrapper-right .card-body .card-summary{width:75%}.hub .hub-cards-wrapper .card-body .hub-image,.hub-cards-wrapper-right .card-body .hub-image{position:absolute;top:0px;right:0px;height:100%;width:25%}.hub .hub-cards-wrapper .card-body .hub-image img,.hub-cards-wrapper-right .card-body .hub-image img{height:100%;width:100%}.hub .hub-cards-wrapper .card-body .hub-image:before,.hub-cards-wrapper-right .card-body .hub-image:before{content:'';position:absolute;top:0;left:0;bottom:0;right:0;z-index:1;background:#000000;opacity:.075}.hub .github-stars-count{color:#797676;position:relative;top:.25rem;font-size:14px}@media screen and (min-width: 768px){.hub .github-stars-count{top:.1875rem;font-size:initial}}.hub .github-stars-count-whole-number{display:none}.hub .github-logo{height:15px;width:13px}.hub .icon-count-container{display:inline-block;vertical-align:text-bottom;margin-left:.5rem}.hub .detail-count{font-size:1.25rem}.hub .main-stars-container{display:flex}.hub .detail-stars-container{display:inline-flex}.hub .detail-stars-container .github-stars-image{margin-left:0}.hub .card-body .hub-card-title-container{width:75%;display:inline-flex;max-width:18.75rem}.hub .card-body .hub-card-title-container .experimental-badge{text-transform:uppercase;margin-left:.9375rem;background-color:#e4e4e4;color:#262626;opacity:0.75;font-size:.625rem;letter-spacing:1px;line-height:1.375rem;height:1.25rem;width:6rem;text-align:center;margin-top:.25rem}.hub .card-body .hub-card-title-container .card-title{padding-left:0;font-size:1.5rem;color:#262626}.hub .card-body .hub-card-title-container .star-list{list-style:none;padding-left:0}.hub .card-body .hub-card-title-container .star-list li{display:inline}.hub .card-body .hub-card-title-container .star-list li.github-stars-count-whole-number{display:none}.hub .hub-filter-menu ul{list-style-type:none;padding-left:1.25rem}.hub .hub-filter-menu ul li{padding-right:1.25rem;word-break:break-all}.hub .hub-filter-menu ul li a{color:#797676}.hub .hub-filter-menu ul li a:hover{color:#ee4c2c}.hub .hub-filter{cursor:pointer}.hub-index #dropdownSortLeft{color:#797676;cursor:pointer;z-index:1;position:absolute;top:inherit;left:23%;max-width:4rem}@media (min-width: 480px) and (max-width: 590px){.hub-index #dropdownSortLeft{left:40%}}.hub #dropdownFilter,#dropdownSort,#dropdownSortLeft{color:#797676;cursor:pointer;z-index:1;position:absolute;top:11rem;right:1rem;left:inherit}@media (min-width: 480px) and (max-width: 590px){.hub #dropdownFilter,#dropdownSort,#dropdownSortLeft{top:7rem}}@media (min-width: 590px){.hub #dropdownFilter,#dropdownSort,#dropdownSortLeft{top:5rem}}@media screen and (min-width: 768px){.hub #dropdownFilter,#dropdownSort,#dropdownSortLeft{top:5rem}}.hub .sort-menu{left:inherit;right:1rem;top:12.5rem;max-width:12rem}@media (min-width: 480px) and (max-width: 590px){.hub .sort-menu{top:8.5rem}}@media (min-width: 590px) and (max-width: 900px){.hub .sort-menu{top:6.5rem}}@media (min-width: 900px) and (max-width: 1239px){.hub .sort-menu{top:6.5rem}}@media screen and (min-width: 1240px){.hub .sort-menu{right:0;top:6.5rem}}.hub-index .sort-menu{left:23%;top:inherit;max-width:12rem}.hub .research-hub-title,.research-hub-sub-title{text-transform:uppercase;letter-spacing:1.78px;line-height:2rem}.research-hub-sub-title{padding-bottom:1.25rem}.hub .research-hub-title{color:#ee4c2c}.hub .all-models-button,.full-docs-button{font-size:1.125rem;position:relative;cursor:pointer;outline:none;padding:.625rem 1.875rem .625rem 1.25rem;background-color:#fff;margin-bottom:0.125rem;border:2px solid #f3f4f7;letter-spacing:-0.25px;line-height:1.75rem;color:#6c6c6d;background-image:url("/assets/images/chevron-right-orange.svg");background-size:6px 13px;background-position:center right 10px;background-repeat:no-repeat}.hub .all-models-button a,.full-docs-button a{color:#6c6c6d}@media screen and (min-width: 768px){.hub .all-models-button:after,.full-docs-button:after{content:"";display:block;width:0;height:1px;position:absolute;bottom:0;left:0;background-color:#ee4c2c;transition:width .250s ease-in-out}.hub .all-models-button:hover:after,.full-docs-button:hover:after{width:100%}.hub .all-models-button:hover,.full-docs-button:hover{color:#262626}}.hub .hub-column{padding-bottom:4.6875rem}.hub.hub-index .hub-column{padding-bottom:0}.hub .how-it-works{padding-top:3.125rem;padding-bottom:2.8125rem}.hub .how-it-works .how-it-works-text{color:#6c6c6d;font-size:1.25rem;letter-spacing:0;line-height:1.875rem}.hub .how-it-works .how-it-works-title-col{padding-bottom:3.4375rem}.hub .how-it-works .full-docs-button{margin-top:1.875rem}.hub .hub-code-text{font-size:80%;color:#262626;background-color:#e2e2e2;padding:2px}.hub .hub-code-block{display:block;border-left:3px solid #ee4c2c;padding:1.25rem 1.5625rem 1.25rem 1.5625rem;margin-bottom:3.75rem}.hub pre.highlight{background-color:#e2e2e2;border-left:2px solid #ee4c2c}.hub code.highlighter-rouge{background-color:#e2e2e2}.hub article{padding-top:1.25rem}@media screen and (min-width: 768px){.hub article{padding-top:0}}.hub article p{color:#262626}@media screen and (min-width: 768px){.hub .hub-detail-background{height:515px}}.hub .dropdown-menu{border-radius:0;padding-bottom:0}.hub .card:hover .hub-image:before{bottom:100%}.hub.hub.hub-detail .github-stars-image img{height:9px}@media screen and (min-width: 768px){.hub.hub.hub-detail .github-stars-image img{height:10px}}.hub #development-models-hide,#research-models-hide{display:none}@media (min-width: 768px){.hub .col-md-6.hub-column{flex:0 0 100%;max-width:100%}}@media screen and (min-width: 1240px){.hub .col-md-6.hub-column{flex:0 0 50%;max-width:50%}}@media (min-width: 768px){.hub .col-md-12.hub-column .col-md-6{flex:0 0 100%;max-width:100%}}@media screen and (min-width: 1240px){.hub .col-md-12.hub-column .col-md-6{flex:0 0 100%;max-width:50%}}.hub .featured-image{padding-bottom:1.25rem}.hub .coming-soon{font-weight:300;font-style:italic}@media screen and (min-width: 768px){.hub.hub-index .jumbotron{height:325px}}.hub.hub-index .jumbotron h1{padding-top:0}@media screen and (min-width: 768px){.hub.hub-index .jumbotron h1{padding-top:3.4375rem}}.hub.hub-index .jumbotron p.lead{padding-top:3.4375rem}.hub.hub-index .main-content-wrapper{margin-top:210px}@media screen and (min-width: 768px){.hub.hub-index .main-content-wrapper{margin-top:280px}}.hub .page-link{font-size:1.25rem;letter-spacing:0;line-height:2.125rem;color:#ee4c2c;width:7.5rem;text-align:center}.hub .filter-btn{color:#797676;border:1px solid #797676;display:inline-block;text-align:center;white-space:nowrap;vertical-align:middle;padding:0.375rem 0.75rem;font-size:1rem;line-height:1.5;margin-bottom:5px}.hub .filter-btn:hover{border:1px solid #ee4c2c;color:#ee4c2c}.hub .selected{border:1px solid #ee4c2c;background-color:#ee4c2c;color:#fff}.hub .selected:hover{color:#fff}.hub .all-tag-selected{background-color:#797676;color:#fff}.hub .all-tag-selected:hover{border-color:#797676;color:#fff}.hub .pagination .page{border:1px solid #dee2e6;padding:0.5rem 0.75rem}.hub .pagination .active .page{background-color:#dee2e6}.hub .hub-tags-container{width:60%}.hub .hub-tags-container.active{width:0}@media screen and (min-width: 768px){.hub .hub-search-wrapper{top:8px}}.hub .hub-search-wrapper .algolia-autocomplete .ds-dropdown-menu{min-width:100%;max-width:100% !important}.hub .hub-search-wrapper .algolia-autocomplete{width:100%}.hub .hub-search-wrapper.active{width:100%}.hub .hub-search-wrapper span{font-size:1.125rem;text-align:center}@media (max-width: 480px){.hub #hub-search-icon{margin-top:1rem}}#hub-search-icon{background-image:url("/assets/images/search-icon.svg");color:transparent;opacity:0.4;width:25px;height:25px;margin-left:3rem;background-size:15px 20px;background-repeat:no-repeat;right:10px;position:absolute;z-index:1;cursor:pointer}#hub-search-icon:hover{background-image:url("/assets/images/search-icon-orange.svg");opacity:1}#hub-search-input{background-color:#CCCDD1;border:none;color:#000;font-size:1.125rem;font-weight:300;line-height:20px;outline:none;position:relative;display:none;width:100%;border-radius:5px;padding:.875rem 0 .875rem .3125rem}#hub-close-search{display:none;margin-left:20px;opacity:0.4;right:10px;position:absolute;z-index:1;cursor:pointer;font-size:1.125rem}@media screen and (min-width: 768px){#hub-close-search{top:1.125rem}}#hub-close-search:hover{color:#ee4c2c;opacity:1}.hub .hub-divider{margin-bottom:2.2rem;margin-top:1.5rem}.hub .active-hub-divider{border-color:#ee4c2c}.hub .hub-search-border{display:flex;align-items:center;flex-direction:row;border:none;background-color:transparent;border-radius:20px;width:100%}.hub .hub-cards-wrapper{z-index:1000}.hub .nav-container{display:flex;width:100%;position:absolute}.compact-cards{width:100%}.compact-cards a{color:#6C6C6D}.compact-cards a:hover{color:#ee4c2c}.compact-hub-card-wrapper{padding:0}.compact-card-container{display:flex;align-items:center}.compact-card-body{padding-top:8px}.compact-card-body:hover{border-bottom:1px solid #ee4c2c;color:#ee4c2c}.compact-card-body:hover .compact-item-title{color:#ee4c2c}.compact-card-body .compact-hub-card-title-container{width:75%;display:flex}.compact-model-card{height:auto;border-bottom:1px solid #E2E2E2}.compact-item-title{padding-left:0;color:#000}.compact-card-summary{white-space:nowrap;overflow:hidden;text-overflow:ellipsis;top:5px}.compact-hub-divider{padding:0;width:100%}.hub-select-container{position:absolute;right:0;height:2rem}.compact-hub-index-cards{padding-bottom:2rem}.full-hub-icon:hover{cursor:pointer;height:3rem}.compact-hub-icon{margin-left:0.5rem;margin-right:3.125rem}.compact-hub-icon:hover{cursor:pointer}.mobile article{margin-bottom:5rem}.mobile .main-background{height:275px}@media screen and (min-width: 768px){.mobile .main-background{height:380px}}.mobile .main-content-wrapper{margin-top:275px}@media screen and (min-width: 768px){.mobile .main-content-wrapper{margin-top:350px}}.mobile .jumbotron{height:190px}@media screen and (min-width: 768px){.mobile .jumbotron{height:260px}}.mobile .main-content .navbar{background-color:#f3f4f7;padding-left:0;padding-bottom:0;padding-top:0}@media (min-width: 992px){.mobile .main-content .navbar li:first-of-type{padding-left:3.4375rem}.mobile .main-content .navbar .nav-item{padding:2rem;cursor:pointer}.mobile .main-content .navbar .nav-link{position:relative;top:10%;transform:translateY(-50%)}}.mobile .main-content .navbar .nav-select{background-color:#fff}.mobile .main-content .navbar .nav-select .nav-link{color:#ee4c2c;font-weight:500}.mobile .main-content .navbar .nav-link{font-size:1.125rem;color:#8c8c8c}@media screen and (min-width: 768px){.mobile .main-content .navbar .nav-link{margin-left:1.875rem}}.mobile .main-content .navbar .nav-link:hover{color:#ee4c2c}.mobile .main-content .navbar .nav-item{padding-top:.9375rem;padding-bottom:.9375rem}@media screen and (min-width: 768px){.mobile .main-content .navbar .nav-item{padding-bottom:0;padding-top:2rem}}@media (min-width: 768px) and (max-width: 1239px){.mobile .main-content .navbar .nav-item{padding-bottom:0;padding-top:2rem}}@media (max-width: 990px){.mobile .main-content .navbar .nav-item{padding-bottom:.625rem;padding-top:1rem}}.mobile .main-content .navbar .navbar-toggler{margin-left:2.5rem}.mobile .main-content{padding-top:0}@media screen and (min-width: 768px){.mobile .main-content{padding-top:1.9rem}}.mobile .nav-menu-wrapper{background-color:#f3f4f7}.mobile .navbar-nav{flex-direction:row}.mobile .mobile-page-sidebar{padding-top:2.5rem;padding-bottom:2.5rem;top:15%}@media screen and (min-width: 768px){.mobile .mobile-page-sidebar{padding-top:0}}.mobile .mobile-page-sidebar ul{padding-left:0}.mobile .mobile-page-sidebar li{list-style-type:none;line-height:23px;margin-bottom:15px}.mobile .mobile-page-sidebar li a{color:#8c8c8c}.mobile .mobile-page-sidebar li a.active,.mobile .mobile-page-sidebar li a:hover{color:#ee4c2c}@media screen and (min-width: 1240px){.deep-learning .header-container{margin-bottom:1rem}}.deep-learning .jumbotron{height:180px}@media screen and (min-width: 768px){.deep-learning .jumbotron{height:250px}}.deep-learning .jumbotron .thank-you-page-container{margin-top:0}@media (min-width: 768px) and (max-width: 1239px){.deep-learning .jumbotron .thank-you-page-container{margin-top:250px}}@media screen and (min-width: 768px){.deep-learning .jumbotron .deep-learning-jumbotron-text{margin-top:55px}.deep-learning .jumbotron .deep-learning-jumbotron-text h1{padding-top:30px}}@media (min-width: 768px) and (max-width: 1239px){.deep-learning .jumbotron .deep-learning-jumbotron-text{max-width:95%;flex-basis:100%}}.deep-learning .jumbotron .deep-learning-thank-you-text{width:80%}.deep-learning .jumbotron .deep-learning-thank-you-text .download-book-link{display:inline-block}.deep-learning .jumbotron .deep-learning-landing-text{width:100%}@media screen and (min-width: 768px){.deep-learning .jumbotron .deep-learning-landing-text{width:85%}}.deep-learning .jumbotron .deep-learning-book-container{display:none}@media screen and (min-width: 768px){.deep-learning .jumbotron .deep-learning-book-container{display:block}}@media (min-width: 768px) and (max-width: 1239px){.deep-learning .jumbotron .deep-learning-book-container{display:none}}.deep-learning .jumbotron .thank-you-book-container{display:none}@media (min-width: 768px) and (max-width: 1239px){.deep-learning .jumbotron .thank-you-book-container{display:block}}@media screen and (min-width: 768px){.deep-learning .jumbotron .thank-you-book-container{display:block}}@media screen and (min-width: 768px){.deep-learning .deep-learning-col{max-width:80%}}@media screen and (min-width: 768px){.deep-learning .deep-learning-background{height:440px}}@media screen and (min-width: 768px){.deep-learning .header-holder{height:90px}}.deep-learning .main-content-wrapper{margin-top:250px}@media screen and (min-width: 768px){.deep-learning .main-content-wrapper{margin-top:480px}}@media screen and (min-width: 768px){.deep-learning .deep-learning-content{padding-top:0}}.deep-learning .main-background{height:250px}@media screen and (min-width: 768px){.deep-learning .main-background{height:440px}}.deep-learning .thank-you-wrapper{margin-top:400px}@media screen and (min-width: 768px){.deep-learning .thank-you-wrapper{margin-top:275px}}.deep-learning .thank-you-background{height:438px}@media screen and (min-width: 768px){.deep-learning .thank-you-background{height:680px}}.deep-learning-container{display:flex;align-items:center}.deep-learning-logo{background-image:url("/assets/images/pytorch-logo.png")}.deep-learning-row{display:flex;align-items:center}.deep-learning-row .lead{margin-top:1rem;margin-bottom:2rem}@media (min-width: 768px) and (max-width: 1239px){.deep-learning-row h1{font-size:3rem}}@media screen and (min-width: 768px){.deep-learning-row h1{margin-top:2rem}}.deep-learning-book{max-width:100%;height:400px}.deep-learning-form{margin-left:-1rem}@media screen and (min-width: 768px){.deep-learning-form{margin-left:0;margin-top:1rem}}#deep-learning-button{margin-top:2rem}.deep-learning-form .email-subscribe-form .deep-learning-input{padding-left:.5rem;background-color:#f3f4f7}.deep-learning-form #mce-error-response{color:#ee4c2c}.video-item{margin-bottom:5rem}.video-item a h5{color:#000;margin-top:1rem}.video-item a:hover h5{color:#ee4c2c}.video-item .image-container{overflow:hidden}.video-item .image-container img{margin:-10% 0;width:100%}.ecosystem .contributor-jumbotron{width:90%}@media screen and (min-width: 768px){.ecosystem .contributor-jumbotron{height:262px}}.ecosystem .contributor-jumbotron .container{max-width:920px}.ecosystem .contributor-jumbotron h1{padding-top:0}.ecosystem .contributor-jumbotron h1 span{font-weight:300;color:#812CE5}.ecosystem .contributor-jumbotron .contributor-jumbo-text h1{color:white}.ecosystem .contributor-jumbotron .contributor-jumbo-text h2{color:white;padding-top:0}.hidden{display:none}.contributor-container-fluid{height:4rem;width:100%}@media screen and (max-width: 767px){.contributor-container-fluid{margin-top:2rem}}@media screen and (min-width: 1200px){.contributor-container-fluid{margin-left:0}}.ecosystem .contributor.main-content{padding-top:0}.ecosystem .contributor.main-content .navbar{padding-left:0;padding-bottom:0;padding-top:0}.ecosystem .contributor.main-content .navbar .nav-item{cursor:pointer}.ecosystem .contributor.main-content .navbar .nav-item:last-of-type{position:relative}@media (min-width: 992px){.ecosystem .contributor.main-content .navbar .nav-item{padding:2rem;cursor:pointer}.ecosystem .contributor.main-content .navbar .nav-link{position:relative;top:10%;transform:translateY(-50%)}}.ecosystem .contributor.main-content .navbar .nav-select{background-color:#fff}.ecosystem .contributor.main-content .navbar .nav-select .nav-link{color:#ee4c2c;font-weight:500}.ecosystem .contributor.main-content .navbar .nav-link{font-size:1.125rem;color:#8c8c8c}@media screen and (min-width: 768px){.ecosystem .contributor.main-content .navbar .nav-link{margin-left:1.875rem}}.ecosystem .contributor.main-content .navbar .nav-link:hover{color:#ee4c2c}.ecosystem .contributor.main-content .navbar .contributor-nav-link{padding-left:1.25rem;padding-right:1.25rem}@media screen and (min-width: 768px){.ecosystem .contributor.main-content .navbar .contributor-nav-link{padding-left:1.875rem;padding-right:1.875rem}}.ecosystem .contributor.main-content .navbar .contributor-nav{flex-direction:row}.ecosystem .contributor.main-content .navbar .nav-item{padding-top:.9375rem;padding-bottom:.9375rem}@media screen and (min-width: 768px){.ecosystem .contributor.main-content .navbar .nav-item{padding-bottom:0;padding-top:2rem}}@media (min-width: 768px) and (max-width: 1239px){.ecosystem .contributor.main-content .navbar .nav-item{padding-bottom:0;padding-top:2rem}}@media (max-width: 990px){.ecosystem .contributor.main-content .navbar .nav-item{padding-bottom:.625rem;padding-top:1rem}}.ecosystem .contributor.main-content .navbar .navbar-toggler{margin-left:2.5rem}.past-issue-container{display:flex}@media (max-width: 767px){.past-issue-container{display:block}}.past-issue-container .get-started-cloud-sidebar .sticky-top{position:-webkit-sticky;position:sticky;top:15%}@media (max-width: 767px){.past-issue-container .get-started-cloud-sidebar .sticky-top{position:relative;top:0;margin-left:0}}.past-issue-container .get-started-cloud-sidebar .pytorch-article li{list-style:initial}.past-issue-container .get-started-cloud-sidebar li{list-style-type:none;line-height:36px;color:#8c8c8c}.past-issue-container .get-started-cloud-sidebar span{white-space:nowrap}#past-issues{max-width:920px;margin:auto;margin-top:0;margin-bottom:0}.contributor-container{max-width:920px;left:0;right:0;margin-left:auto;margin-right:auto;padding-left:30px;padding-right:30px;width:90%}.past-issue-container.container{padding-left:5px;padding-top:45px}.nav-background{width:100%;background-color:#f3f4f7}#get-started-contributor-sidebar-list{padding-left:0}#get-started-contributor-sidebar-list .active{color:#ee4c2c}#get-started-contributor-sidebar-list li a{color:#8c8c8c}.two-column-row{max-width:920px;margin:0 auto 0 auto;padding:0 30px 43px 30px;width:90%}@media screen and (min-width: 768px){.two-column-row{display:flex}}.two-column-row h2{text-transform:uppercase;font-weight:100;margin-bottom:30px}.two-column-row p{margin-bottom:40px}.two-column-row .content-left{flex:60%;padding-top:76px}@media screen and (min-width: 768px){.two-column-row .content-left{margin-right:62px}}.two-column-row .content-left h2{color:#ee4c2c}.two-column-row .content-left .contributor-consent-check{max-width:400px}.two-column-row .content-left .email-consent{color:#797676;font-size:14px}.two-column-row .content-left .please-accept-terms{display:none;color:#ee4c2c;font-size:14px}.two-column-row .content-right{flex:40%;padding-top:76px}.two-column-row .content-right h2{color:#812CE5}.two-column-row .contributor-form{margin:-8px 0 47px 0}.two-column-row .contributor-form .form-success,.two-column-row .contributor-form .form-fail{color:#ee4c2c;display:none;flex:none;margin:8px 0 12px 0}.two-column-row .contributor-form form{width:100%}.two-column-row .contributor-form form .contributor-form-ui{display:flex;max-width:390px;flex-wrap:wrap}.two-column-row .contributor-form form .contributor-form-ui input[type="text"]{border:1px solid #e6e6e6;border-radius:4px;flex:1 70%;padding:5px 8px 5px 8px;margin-right:10px}.two-column-row .contributor-form form .contributor-form-ui input[type="text"]::-moz-placeholder{color:silver}.two-column-row .contributor-form form .contributor-form-ui input[type="text"]:-ms-input-placeholder{color:silver}.two-column-row .contributor-form form .contributor-form-ui input[type="text"]::-ms-input-placeholder{color:silver}.two-column-row .contributor-form form .contributor-form-ui input[type="text"]::placeholder{color:silver}.two-column-row .contributor-form form .contributor-form-ui input[type="text"]:focus{border:1px solid #ee4c2c}.two-column-row .contributor-form form .contributor-form-ui input[type="submit"]{background:#e6e6e6;border:none;border-radius:4px;color:#6d6d6d}.two-column-row .contributor-form form .contributor-form-ui input[type="submit"]:hover{background:silver;color:#3a3a3a}.two-column-row .contributor-form input[type="checkbox"]{margin:1px 6px 0 0}.two-column-row .contributor-form .contributor-consent-check{color:#797676;margin-top:1rem}.two-column-row .contributors-button{background-image:url("/assets/images/chevron-right-orange.svg");background-color:#fff;background-size:6px 13px;background-position:center right 10px;background-repeat:no-repeat;border:2px solid #f3f4f7;color:#6c6c6d;cursor:pointer;font-size:1.125rem;outline:none;letter-spacing:-0.25px;line-height:1.75rem;margin-bottom:0.125rem;padding:.625rem 1.875rem .625rem 1.25rem}.two-column-row .contributors-button a{color:#6c6c6d}@media screen and (min-width: 768px){.two-column-row .contributors-button:after{content:"";display:block;width:0;height:1px;position:absolute;bottom:0;left:0;background-color:#ee4c2c;transition:width .250s ease-in-out}.two-column-row .contributors-button:hover:after{width:100%}.two-column-row .contributors-button:hover{color:#262626}}.mobile .enterprise-jumbotron{height:210px}@media screen and (min-width: 768px){.mobile .enterprise-jumbotron{height:280px}}.enterprise{padding-bottom:0}.enterprise p,.enterprise li{color:#6c6c6d;font-size:18px}.enterprise h2{padding-bottom:1.5rem}.enterprise .container{padding:48px 30px 48px 30px}.enterprise .enterprise-gray-container{background-color:#f3f4f7}.enterprise .pyt-enterprise-logo{background-image:url("/assets/images/PTE_lockup_PRIMARY.svg");background-repeat:no-repeat;height:60px}.enterprise .container{max-width:940px}.enterprise .enterprise-landing-azure-logo-container{float:left;padding:0}.ecosystem .events-wrapper{background-color:white}@media screen and (min-width: 768px){.ecosystem .events-wrapper{margin-top:472px}}.ecosystem .events{padding-top:0}.ecosystem .events .event-info-container{display:flex;flex-flow:column}.ecosystem .events .sticky-top{top:15%}.ecosystem .events .event-label{margin-bottom:2rem}.ecosystem .live-event-container{display:flex}@media (max-width: 767px){.ecosystem .live-event-container{flex-flow:wrap}}.ecosystem .events-section{max-width:920px;margin:0 auto 0 auto;padding:0 30px 43px 30px;width:90%}.ecosystem .events-section .event-item{padding-bottom:3rem;border-bottom:1px solid #D6D7D8}.ecosystem .events-section .event-item h2{padding-bottom:1rem}.ecosystem .community-event{margin:0;padding:3px 10px;border:1px solid #8c8c8c;border-radius:3px;text-transform:uppercase;font-size:14px;font-weight:700;color:#8c8c8c}.ecosystem .event-side-nav-container{padding-left:3rem}.ecosystem .event-side-nav-container ul{list-style:none}.ecosystem .live-events-section p{font-size:18px;margin-top:2rem}@media (min-width: 768px) and (max-width: 1239px){.ecosystem .live-events-section{width:100%;padding-left:5px;padding-right:5px}}@media (max-width: 767px){.ecosystem .live-events-section{width:100%;padding-left:5px;padding-right:5px}}.ecosystem .events.main-content{padding-top:0}.events-container-fluid{height:5rem;width:100%;padding-bottom:7rem}@media screen and (max-width: 767px){.events-container-fluid{margin-top:2rem}}@media screen and (min-width: 1200px){.events-container-fluid{margin-left:0}}.events-container{max-width:920px;left:0;right:0;margin-left:auto;margin-right:auto;padding-left:0px;padding-right:0px;width:90%}.ecosystem .events.main-content .navbar{padding-left:0;padding-bottom:0;padding-top:0}.ecosystem .events.main-content .navbar .nav-item{cursor:pointer}.ecosystem .events.main-content .navbar .nav-item:last-of-type{position:relative}@media (min-width: 992px){.ecosystem .events.main-content .navbar .nav-item{padding:.5rem;cursor:pointer}.ecosystem .events.main-content .navbar .nav-link{position:relative;top:10%;transform:translateY(-50%)}}.ecosystem .events.main-content .navbar .nav-select{background-color:#fff}.ecosystem .events.main-content .navbar .nav-select .nav-link{color:#ee4c2c;font-weight:500}.ecosystem .events.main-content .navbar .nav-link{font-size:1.125rem;color:#8c8c8c}@media screen and (min-width: 768px){.ecosystem .events.main-content .navbar .nav-link{margin-left:1.875rem}}.ecosystem .events.main-content .navbar .nav-link:hover{color:#ee4c2c}.ecosystem .events.main-content .navbar .events-nav-link{padding-left:.9375rem;padding-right:.3125rem}@media screen and (min-width: 768px){.ecosystem .events.main-content .navbar .events-nav-link{padding-left:1.25rem;padding-right:1.25rem}}.ecosystem .events.main-content .navbar .events-nav{flex-direction:row}.ecosystem .events.main-content .navbar .nav-item{padding-top:.9375rem;padding-bottom:.9375rem}@media screen and (min-width: 768px){.ecosystem .events.main-content .navbar .nav-item{padding-bottom:0;padding-top:2rem}}@media (min-width: 768px) and (max-width: 1239px){.ecosystem .events.main-content .navbar .nav-item{padding-bottom:0;padding-top:2rem}}@media (max-width: 990px){.ecosystem .events.main-content .navbar .nav-item{padding-bottom:.625rem;padding-top:1rem}}.ecosystem .events.main-content .navbar .navbar-toggler{margin-left:2.5rem}.events-video-wrapper{width:100%;border:1px solid #797676;background-color:#f3f4f7;height:21rem;margin-top:2.5rem}.events-video-wrapper .video-container{display:flex;top:12%}.events-video-wrapper .video-tabs{display:flex}.events-video-wrapper .events-video-nav{flex-direction:row;padding-right:0;margin-bottom:1rem}.events-video-wrapper .events-video-nav .nav-item{border-right:1px solid #797676;border-bottom:1px solid #797676}.events-video-wrapper .events-video-nav .nav-select{background-color:#fff;border-bottom:none}.events-video-wrapper .events-video-nav .nav-select .nav-link{color:#ee4c2c}.events-video-wrapper .events-nav-link{text-align:center}.events-video-wrapper .video{position:relative;height:0;padding-bottom:30%;place-self:center}.events-video-wrapper .video-info{margin-left:3rem;max-width:45%}.events-video-wrapper iframe{height:100%;width:100%;position:absolute}.video-links-container{border:1px solid #797676}.video-links-container .video-links{display:flex}.video-links-container .video-links .video-link-item{padding-left:1rem;list-style:none}.episode-header-text{font-size:26px;margin-bottom:2rem}.episode-card-row{display:block}@media screen and (min-width: 908px){.episode-card-row{display:flex;flex-wrap:wrap;margin-bottom:2rem}}.episode-card-row .episode-card.resource-card{height:14rem;margin-right:1rem;margin-bottom:1rem;background-color:#f3f4f7;border:none;max-width:31%;flex:auto}.episode-card-row .episode-card.resource-card ul{list-style:none}.episode-card-row .episode-card.resource-card a{color:inherit}.episode-card-row .episode-card.resource-card .episode-body{display:block;position:relative;top:30px;margin-left:20px}.episode-card-row .episode-card.resource-card .episode-title{margin-left:3.2rem;margin-bottom:.5rem;font-size:1.5rem}@media screen and (min-width: 768px){.episode-card-row .episode-card.resource-card .episode-title{margin-left:2.5rem}}.episode-card-row .episode-card.resource-card .guest-name{font-weight:500;font-size:1.25rem;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.episode-card-row .episode-card.resource-card .episode-info{display:flex;justify-content:space-between}.episode-card-row .episode-card.resource-card .episode-info span{padding-left:5px;padding-right:5px}.episode-card-row .episode-card.resource-card .info-divide{display:block;border-bottom:1px solid #D6D7D8;margin-top:.5rem;margin-bottom:.5rem}.episode-card-row .episode-card.resource-card .episode-poster{color:#ee4c2c}.episode-card-row .episode-card.resource-card .episode-date-time{display:flex;padding-left:0}.episode-card-row .episode-card.resource-card .episode-date-time span{padding-left:5px;padding-right:5px}@media screen and (max-width: 907px){.episode-card-row .episode-card.resource-card{max-width:100%;margin-bottom:1.25rem}}.episode-card-row .episode-card.resource-card.pytorch-resource:before{content:"";background-size:32px 32px;background-repeat:no-repeat;display:block;position:absolute;height:32px;width:32px;top:30px;left:15px}@media screen and (min-width: 768px){.episode-card-row .episode-card.resource-card.pytorch-resource:before{left:30px;top:30px}}.podcast-container{padding-left:0}@media screen and (min-width: 768px){.podcast-container{display:flex}.podcast-container .podcast-card:not(:first-of-type){margin-left:1rem}}.podcast-container .podcast-card{display:flex;align-items:center;justify-content:center;margin-top:2rem;border:1px solid #D6D7D8;height:8.75rem}@media screen and (min-width: 768px){.podcast-container .podcast-card:after{content:"";display:block;width:0;height:1px;position:absolute;bottom:0;left:0;background-color:#ee4c2c;transition:width .250s ease-in-out}.podcast-container .podcast-card:hover:after{width:100%}.podcast-container .podcast-card:hover{color:#262626}}.podcast-container .podcast-title{font-size:24px;font-weight:400}.comm-stories .community-stories-wrapper{background-color:white}.comm-stories .community-stories{padding-top:0}.comm-stories .community-stories .production-info-container,.comm-stories .community-stories .research-info-container{display:flex;flex-flow:column}.comm-stories .community-stories .sticky-top{top:15%}.comm-stories .production-container,.comm-stories .research-container{display:flex;padding-left:0}@media (max-width: 767px){.comm-stories .production-container,.comm-stories .research-container{flex-flow:wrap}}.comm-stories .production-section,.comm-stories .research-section{max-width:920px;margin:0 auto 0 auto;padding:0 30px 43px 30px;width:90%}.comm-stories .production-section .production-item,.comm-stories .production-section .research-item,.comm-stories .research-section .production-item,.comm-stories .research-section .research-item{padding-bottom:2rem;padding-top:2rem;border-bottom:1px solid #d6d7d8}.comm-stories .production-section .production-item h2,.comm-stories .production-section .research-item h2,.comm-stories .research-section .production-item h2,.comm-stories .research-section .research-item h2{padding-bottom:1rem}.comm-stories .production-side-nav-container #research-sidebar-list,.comm-stories .production-side-nav-container #production-sidebar-list,.comm-stories .research-side-nav-container #research-sidebar-list,.comm-stories .research-side-nav-container #production-sidebar-list{padding-left:0}.comm-stories .production-side-nav-container #research-sidebar-list .active,.comm-stories .production-side-nav-container #production-sidebar-list .active,.comm-stories .research-side-nav-container #research-sidebar-list .active,.comm-stories .research-side-nav-container #production-sidebar-list .active{color:#ee4c2c}.comm-stories .production-side-nav-container #research-sidebar-list ul,.comm-stories .production-side-nav-container #production-sidebar-list ul,.comm-stories .research-side-nav-container #research-sidebar-list ul,.comm-stories .research-side-nav-container #production-sidebar-list ul{padding-left:3rem;list-style:none}.comm-stories .production-side-nav-container #research-sidebar-list ul li,.comm-stories .production-side-nav-container #production-sidebar-list ul li,.comm-stories .research-side-nav-container #research-sidebar-list ul li,.comm-stories .research-side-nav-container #production-sidebar-list ul li{line-height:36px}.comm-stories .production-side-nav-container #research-sidebar-list ul li a,.comm-stories .production-side-nav-container #production-sidebar-list ul li a,.comm-stories .research-side-nav-container #research-sidebar-list ul li a,.comm-stories .research-side-nav-container #production-sidebar-list ul li a{color:#8c8c8c}.comm-stories .production-section p,.comm-stories .research-section p{font-size:18px;margin-top:2rem}@media (min-width: 768px) and (max-width: 1239px){.comm-stories .production-section,.comm-stories .research-section{width:100%;padding-left:5px;padding-right:5px}}@media (max-width: 767px){.comm-stories .production-section,.comm-stories .research-section{width:100%;padding-left:5px;padding-right:5px}}.comm-stories .main-content-wrapper{margin-top:275px}@media screen and (min-width: 768px){.comm-stories .main-content-wrapper{margin-top:380px}}.comm-stories .jumbotron{color:#fff;height:190px}@media screen and (min-width: 768px){.comm-stories .jumbotron{height:260px}}.ecosystem .community-stories.main-content{padding-top:0}.community-stories-container-fluid{height:5rem;width:100%;padding-bottom:7rem}@media screen and (max-width: 767px){.community-stories-container-fluid{margin-top:2rem}}@media screen and (min-width: 1200px){.community-stories-container-fluid{margin-left:0}}.comm-stories .community-stories.main-content .navbar{padding-left:0;padding-bottom:0;padding-top:0}.comm-stories .community-stories.main-content .navbar .nav-item{cursor:pointer}.comm-stories .community-stories.main-content .navbar .nav-item:last-of-type{position:relative}@media (min-width: 992px){.comm-stories .community-stories.main-content .navbar .nav-item{padding:2rem;cursor:pointer}.comm-stories .community-stories.main-content .navbar .nav-link{position:relative;top:10%;transform:translateY(-50%)}}.comm-stories .community-stories.main-content .navbar .nav-select{background-color:#fff}.comm-stories .community-stories.main-content .navbar .nav-select .nav-link{color:#ee4c2c;font-weight:500}.comm-stories .community-stories.main-content .navbar .nav-link{font-size:1.125rem;color:#8c8c8c}@media screen and (min-width: 768px){.comm-stories .community-stories.main-content .navbar .nav-link{margin-left:1.875rem}}.comm-stories .community-stories.main-content .navbar .nav-link:hover{color:#ee4c2c}.comm-stories .community-stories.main-content .navbar .community-stories-nav-link{padding-left:1.25rem;padding-right:1.25rem}@media screen and (min-width: 768px){.comm-stories .community-stories.main-content .navbar .community-stories-nav-link{padding-left:1.875rem;padding-right:1.875rem}}.comm-stories .community-stories.main-content .navbar .community-stories-nav{flex-direction:row}.comm-stories .community-stories.main-content .navbar .nav-item{padding-top:.9375rem;padding-bottom:.9375rem}@media screen and (min-width: 768px){.comm-stories .community-stories.main-content .navbar .nav-item{padding-bottom:0;padding-top:2rem}}@media (min-width: 768px) and (max-width: 1239px){.comm-stories .community-stories.main-content .navbar .nav-item{padding-bottom:0;padding-top:2rem}}@media (max-width: 990px){.comm-stories .community-stories.main-content .navbar .nav-item{padding-bottom:.625rem;padding-top:1rem}}.comm-stories .community-stories.main-content .navbar .navbar-toggler{margin-left:2.5rem}.announcement .hero-content{top:148px;height:250px;position:relative;margin-bottom:120px;justify-content:center}@media screen and (min-width: 768px){.announcement .hero-content{top:178px;height:350px}}.announcement .hero-content h1{font-size:3.75rem;text-transform:uppercase;font-weight:lighter;letter-spacing:1.08px;margin-bottom:.625rem;line-height:1.05;color:#fff}@media screen and (min-width: 768px){.announcement .hero-content h1{font-size:4.5rem}}.announcement .hero-content h1.small{font-size:40px}@media screen and (min-width: 768px){.announcement .hero-content h1.small{font-size:58px}}.announcement .hero-content .lead{margin-bottom:1.5625rem;padding-top:1.875rem;color:#fff;width:100%}.announcement .row{justify-content:center}.announcement .main-content{margin-bottom:5rem;padding-bottom:0}.announcement .main-background{height:370px}@media screen and (min-width: 768px){.announcement .main-background{height:450px}}.announcement .card-container{display:grid;grid-template-columns:repeat(2, 1fr);gap:20px;padding-top:3rem}.announcement .card-container .card{border:none;display:block}.announcement .card-container .card a{color:#000}.announcement .card-container .card .card-body{display:flex;flex-direction:column;height:100%;justify-content:space-between;padding:0}.announcement .card-container .card .card-body img{width:100%;height:207px;-o-object-fit:contain;object-fit:contain;padding:20px}@media screen and (min-width: 1000px){.announcement .card-container .card .card-body img{padding:30px}}@media screen and (min-width: 1000px){.announcement .card-container{grid-template-columns:repeat(3, 1fr);gap:36px}}.announcement .contact-us-section{background-color:#f3f4f7;padding:50px 0}.announcement .contact-us-section .row{justify-content:center}.announcement .contact-us-section .row .lead{padding-top:1.5rem}.announcement .contact-us-section .row .hbspt-form{padding:30px 0}.announcement .contact-us-section .row .hbspt-form .hs-button{background-image:url("/assets/images/chevron-right-orange.svg");background-size:6px 13px;background-position:top 16px right 11px;background-repeat:no-repeat;border-radius:0;border:none;background-color:#fff;color:#6c6c6d;font-weight:400;position:relative;letter-spacing:0.25px;padding:.75rem 2rem .75rem .75rem;margin:10px 0}@media screen and (min-width: 768px){.announcement .contact-us-section .row .hbspt-form .hs-button:after{content:"";display:block;width:0;height:1px;position:absolute;bottom:0;left:0;background-color:#ee4c2c;transition:width .250s ease-in-out}.announcement .contact-us-section .row .hbspt-form .hs-button:hover:after{width:100%}.announcement .contact-us-section .row .hbspt-form .hs-button:hover{color:#262626}}@media screen and (min-width: 768px){.announcement .contact-us-section .row .hbspt-form .hs-button{background-position:top 19px right 11px}}.announcement .contact-us-section .row .hbspt-form fieldset.form-columns-2,.announcement .contact-us-section .row .hbspt-form fieldset.form-columns-1{max-width:100%}.announcement .contact-us-section .row .hbspt-form fieldset.form-columns-2 .hs-form-field,.announcement .contact-us-section .row .hbspt-form fieldset.form-columns-1 .hs-form-field{max-width:100%;padding:10px 0;width:100%}.announcement .contact-us-section .row .hbspt-form fieldset.form-columns-2 .hs-form-field input,.announcement .contact-us-section .row .hbspt-form fieldset.form-columns-1 .hs-form-field input{border:none;width:100%}.announcement .contact-us-section .row .hbspt-form fieldset.form-columns-2 .hs-form-field textarea,.announcement .contact-us-section .row .hbspt-form fieldset.form-columns-1 .hs-form-field textarea{border:none;width:100%}.announcement .contact-us-section .row .hbspt-form li.hs-form-radio input[type=radio]{width:auto !important}.announcement .contact-us-section .row .hbspt-form li.hs-form-radio span{margin-left:5px}.announcement .contact-us-section .row .hbspt-form ul{list-style-type:none}.announcement .light-background-section{background-color:#fff}.announcement .light-background-section .content{padding:40px 0}.announcement .light-background-section ul li{font-size:1.25rem;font-weight:300}.announcement .darker-background-section{background-color:#f3f4f7}.announcement .darker-background-section .content{padding:40px 0}.announcement .grey-background-section{background-color:#f3f4f7;padding:60px 0}.announcement .grey-background-section img{height:100px}.announcement .grey-background-section p{font-size:14px;line-height:170%}.announcement .color-background-section{background-image:url("/assets/images/pytorch_bg_purple.jpg");background-size:100% 100%;background-repeat:no-repeat;padding:60px 0}.announcement .color-background-section h2{color:white}.announcement .body-side-text .lead{margin-bottom:1.5625rem;padding-top:1.5rem}.announcement img{width:100%}.announcement h2.upper{font-size:25px;line-height:130%;text-align:center;letter-spacing:1.75px;text-transform:uppercase;margin-bottom:30px}.announcement h3.upper{font-size:19px;text-transform:uppercase;letter-spacing:1.75px;line-height:130%;margin:25px 0}.announcement table.benefits{background-color:white;font-size:14px;text-align:center}.announcement table.benefits td.benefit{border-left:none;min-width:300px;text-align:left}@media screen and (min-width: 768px){.announcement table.benefits td.benefit{min-width:520px}}.announcement table.benefits tbody td{border-left:1px solid #812CE5;vertical-align:middle}.announcement table.benefits tbody td.benefit{font-weight:600}.announcement table.benefits thead,.announcement table.benefits tfoot{background-color:#812CE5;color:white;font-size:16px;font-weight:700}@media screen and (min-width: 768px){.announcement table.benefits thead,.announcement table.benefits tfoot{font-size:20px}}.announcement table.benefits thead td,.announcement table.benefits tfoot td{border-left:1px solid #000;vertical-align:middle;border-top:none}.announcement table.benefits thead a,.announcement table.benefits tfoot a{text-decoration:underline;color:white}.announcement table.benefits thead td.price,.announcement table.benefits tfoot td.price{font-size:14px;line-height:1.2}@media screen and (min-width: 768px){.announcement table.benefits thead td.price,.announcement table.benefits tfoot td.price{font-size:16px}}.announcement table.benefits img{width:15px}.announcement .modal-header{border-bottom:none;padding-bottom:0}.announcement .consolidated-employees tbody td{font-weight:600}.announcement .consolidated-employees td.no-border{border-left:none}.announcement .member-boxes{gap:20px;margin:0}.announcement .member-boxes div.col-sm{background-color:white}.board-member{margin:35px 0}.board-member img{margin-bottom:15px}.board-member a svg{margin-top:5px;height:25px;max-width:30px;fill:#000;color:#000}.board-member a:hover svg{fill:#ee4c2c;color:#ee4c2c}.announcement .cloud-credits-table{font-size:1.1rem;margin-top:40px}.announcement .cloud-credits-table ul{padding-left:20px}.announcement .cloud-credits-table ul li{margin-top:10px;font-size:1.1rem}.announcement .cloud-credits-table .col-md{border-radius:5px;margin-bottom:40px}.announcement .cloud-credits-table .card{border-radius:6px}.announcement .cloud-credits-table .thead{border-top-left-radius:5px;border-top-right-radius:5px;color:#fff;padding:14px 20px;text-align:center}.announcement .cloud-credits-table .col-md:first-child .thead{background:conic-gradient(from 53deg at 37% 100%, #828282 0, rgba(130,130,130,0.95) 100%)}.announcement .cloud-credits-table .col-md:nth-child(2) .thead{background:conic-gradient(from 53deg at 37% 100%, #ab9344 0, rgba(171,147,68,0.95) 100%)}.announcement .cloud-credits-table .col-md:nth-child(3) .thead{background:conic-gradient(from 53deg at 37% 100%, #293850 0, rgba(41,56,80,0.95) 100%)}.announcement .cloud-credits-table .tbody{border-bottom:1px solid #d0d0d0;border-left:1px solid #d0d0d0;border-right:1px solid #d0d0d0;height:100%;padding:26px 20px}.announcement .cloud-credits-table .tfoot{background-color:#000;border-bottom-left-radius:5px;border-bottom-right-radius:5px;color:#fff;padding:20px;text-align:center}.announcement .steps-columns{background-color:transparent}.announcement .steps-columns .col-md{margin-bottom:20px;padding:20px}.announcement .steps-columns h3{margin-bottom:20px}.announcement .steps-columns .step{font-size:1.5rem;margin-bottom:5px;margin-top:20px}.announcement .steps-columns ul{padding-left:20px}.announcement .steps-columns ul li{margin-top:10px} diff --git a/assets/main.scss b/assets/main.scss deleted file mode 100644 index 46de7d9d32c7..000000000000 --- a/assets/main.scss +++ /dev/null @@ -1,40 +0,0 @@ ---- ---- - -@import "bootstrap/scss/functions"; -@import "bootstrap/scss/bootstrap"; -@import "syntax-highlighting"; - -$baseurl:"{{ site.baseurl }}"; - -@import "variables"; -@import "bootstrap-overrides"; -@import "fonts"; -@import "base_styles"; -@import "code"; -@import "navigation"; -@import "jumbotron"; -@import "homepage"; -@import "footer"; -@import "main-content"; -@import "article"; -@import "get-started"; -@import "ecosystem"; -@import "features"; -@import "resources"; -@import "quick-start-module"; -@import "blog"; -@import "similar-posts-module"; -@import "search"; -@import "cookie-banner"; -@import "hub"; -@import "hub-search"; -@import "compact"; -@import "mobile"; -@import "deep-learning"; -@import "videos"; -@import "contributors"; -@import "enterprise"; -@import "events"; -@import "community-stories"; -@import "announcement"; diff --git a/board_info/advanced-micro-devices.html b/board_info/advanced-micro-devices.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/board_info/advanced-micro-devices.html @@ -0,0 +1 @@ + diff --git a/board_info/arm.html b/board_info/arm.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/board_info/arm.html @@ -0,0 +1 @@ + diff --git a/board_info/aws.html b/board_info/aws.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/board_info/aws.html @@ -0,0 +1 @@ + diff --git a/board_info/google-cloud.html b/board_info/google-cloud.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/board_info/google-cloud.html @@ -0,0 +1 @@ + diff --git a/board_info/huawei.html b/board_info/huawei.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/board_info/huawei.html @@ -0,0 +1 @@ + diff --git a/board_info/hugging-face.html b/board_info/hugging-face.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/board_info/hugging-face.html @@ -0,0 +1 @@ + diff --git a/board_info/ibm.html b/board_info/ibm.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/board_info/ibm.html @@ -0,0 +1 @@ + diff --git a/board_info/intel.html b/board_info/intel.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/board_info/intel.html @@ -0,0 +1 @@ + diff --git a/board_info/lightning.html b/board_info/lightning.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/board_info/lightning.html @@ -0,0 +1 @@ + diff --git a/board_info/meta.html b/board_info/meta.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/board_info/meta.html @@ -0,0 +1 @@ + diff --git a/board_info/microsoft-corporation.html b/board_info/microsoft-corporation.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/board_info/microsoft-corporation.html @@ -0,0 +1 @@ + diff --git a/board_info/nvidia-corporation.html b/board_info/nvidia-corporation.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/board_info/nvidia-corporation.html @@ -0,0 +1 @@ + diff --git a/case_studies/amazon-ads.html b/case_studies/amazon-ads.html new file mode 100644 index 000000000000..3b094ff04346 --- /dev/null +++ b/case_studies/amazon-ads.html @@ -0,0 +1,314 @@ + + + + + + + + + + + + + Amazon Ads | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        + +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + +
        +
        +

        August 22, 2025

        +

        + Amazon Ads +

        +
        +
        + +
        +
        +
        + +
        +

        + by + + Team PyTorch + +

        +

        Reduce inference costs by 71% and drive scale out using PyTorch, TorchServe, and AWS Inferentia.

        + +
        +
        +
        +
        + + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/case_studies/salesforce.html b/case_studies/salesforce.html new file mode 100644 index 000000000000..1b2dbcf24b25 --- /dev/null +++ b/case_studies/salesforce.html @@ -0,0 +1,314 @@ + + + + + + + + + + + + + Salesforce | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        + +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + +
        +
        +

        August 22, 2025

        +

        + Salesforce +

        +
        +
        + +
        +
        +
        + +
        +

        + by + + Team PyTorch + +

        +

        Pushing the state of the art in NLP and Multi-task learning.

        + +
        +
        +
        +
        + + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/case_studies/stanford-university.html b/case_studies/stanford-university.html new file mode 100644 index 000000000000..bba019118d2c --- /dev/null +++ b/case_studies/stanford-university.html @@ -0,0 +1,314 @@ + + + + + + + + + + + + + Stanford University | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        + +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + +
        +
        +

        August 22, 2025

        +

        + Stanford University +

        +
        +
        + +
        +
        +
        + +
        +

        + by + + Team PyTorch + +

        +

        Using PyTorch’s flexibility to efficiently research new algorithmic approaches.

        + +
        +
        +
        +
        + + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem-github-stars.json b/ecosystem-github-stars.json index 719211d02384..ee842a222dd9 100644 --- a/ecosystem-github-stars.json +++ b/ecosystem-github-stars.json @@ -1,13 +1,525 @@ ---- ---- - { "data": [ - {% for item in site.ecosystem %} + { - "id": "{{ item.github-id }}" + "id": "pytorch/captum" } - {% if forloop.last != true %},{% endif %} - {% endfor %} + , + + { + "id": "flairNLP/flair" + } + , + + { + "id": "asyml/forte" + } + , + + { + "id": "pytorch/ignite" + } + , + + { + "id": "open-mmlab" + } + , + + { + "id": "huggingface/accelerate" + } + , + + { + "id": "petuum/adaptdl" + } + , + + { + "id": "BorealisAI/advertorch" + } + , + + { + "id": "albumentations-team/albumentations" + } + , + + { + "id": "allenai/allennlp" + } + , + + { + "id": "ContinualAI/avalanche" + } + , + + { + "id": "ElementAI/baal" + } + , + + { + "id": "pytorch/botorch" + } + , + + { + "id": "catalyst-team/catalyst" + } + , + + { + "id": "aramis-lab/AD-DL" + } + , + + { + "id": "hpcaitech/ColossalAI" + } + , + + { + "id": "hpcaitech/ColossalAI" + } + , + + { + "id": "mosaicml/composer" + } + , + + { + "id": "facebookresearch/CrypTen" + } + , + + { + "id": "microsoft/DeepSpeed" + } + , + + { + "id": "thuml/depyf" + } + , + + { + "id": "facebookresearch/detectron2" + } + , + + { + "id": "determined-ai/determined" + } + , + + { + "id": "dmlc/dgl" + } + , + + { + "id": "huggingface/diffusers" + } + , + + { + "id": "mindee/doctr" + } + , + + { + "id": "arogozhnikov/einops" + } + , + + { + "id": "TorchEnsemble-Community/Ensemble-Pytorch" + } + , + + { + "id": "facebookresearch/fairscale" + } + , + + { + "id": "fastai/fastai" + } + , + + { + "id": "adap/flower" + } + , + + { + "id": "BiomedSciAI/fuse-med-ml" + } + , + + { + "id": "mlcommons/GaNDLF" + } + , + + { + "id": "pytorch/glow" + } + , + + { + "id": "cornellius-gp/gpytorch" + } + , + + { + "id": "facebookresearch/higher" + } + , + + { + "id": "horovod/horovod" + } + , + + { + "id": "microsoft/hummingbird" + } + , + + { + "id": "facebookresearch/hydra" + } + , + + { + "id": "intel/neural-compressor" + } + , + + { + "id": "intel/intel-extension-for-pytorch" + } + , + + { + "id": "unifyai/ivy" + } + , + + { + "id": "joeynmt/joeynmt" + } + , + + { + "id": "kornia/kornia" + } + , + + { + "id": "lyft/l5kit" + } + , + + { + "id": "lightly-ai/lightly" + } + , + + { + "id": "ludwig-ai/ludwig" + } + , + + { + "id": "facebookresearch/mmf" + } + , + + { + "id": "Project-MONAI/MONAI" + } + , + + { + "id": "NVIDIA/NeMo" + } + , + + { + "id": "octoml/octoml-profile" + } + , + + { + "id": "microsoft/onnxruntime" + } + , + + { + "id": "pytorch/opacus" + } + , + + { + "id": "open-compass/opencompass" + } + , + + { + "id": "optuna/optuna" + } + , + + { + "id": "lf1-io/padl" + } + , + + { + "id": "facebookresearch/ParlAI" + } + , + + { + "id": "PennyLaneAI/pennylane" + } + , + + { + "id": "pfnet/pfrl" + } + , + + { + "id": "polyaxon/polyaxon" + } + , + + { + "id": "jmschrei/pomegranate" + } + , + + { + "id": "graphcore/poptorch" + } + , + + { + "id": "GRAAL-Research/poutyne" + } + , + + { + "id": "pykale/pykale" + } + , + + { + "id": "pypose/pypose" + } + , + + { + "id": "WenjieDu/PyPOTS" + } + , + + { + "id": "pyro-ppl/pyro" + } + , + + { + "id": "pystiche/pystiche" + } + , + + { + "id": "OpenMined/PySyft" + } + , + + { + "id": "pyg-team/pytorch_geometric" + } + , + + { + "id": "PyTorchLightning/pytorch-lightning" + } + , + + { + "id": "KevinMusgrave/pytorch-metric-learning" + } + , + + { + "id": "PetrochukM/PyTorch-NLP" + } + , + + { + "id": "facebookresearch/pytorch3d" + } + , + + { + "id": "benedekrozemberczki/pytorch_geometric_temporal" + } + , + + { + "id": "pytorchfi/pytorchfi" + } + , + + { + "id": "facebookresearch/pytorchvideo" + } + , + + { + "id": "azavea/raster-vision" + } + , + + { + "id": "ray-project/ray" + } + , + + { + "id": "awslabs/renate" + } + , + + { + "id": "" + } + , + + { + "id": "IBM/simulai" + } + , + + { + "id": "skorch-dev/skorch" + } + , + + { + "id": "DLR-RM/stable-baselines3" + } + , + + { + "id": "fidelity/stoke" + } + , + + { + "id": "substra" + } + , + + { + "id": "tensorly/tensorly" + } + , + + { + "id": "airaria/TextBrewer" + } + , + + { + "id": "TissueImageAnalytics/tiatoolbox" + } + , + + { + "id": "yoshitomo-matsubara/torchdistill" + } + , + + { + "id": "TorchDrift/TorchDrift" + } + , + + { + "id": "DeepGraphLearning/torchdrug" + } + , + + { + "id": "microsoft/torchgeo" + } + , + + { + "id": "fepegar/torchio" + } + , + + { + "id": "PyTorchLightning/metrics" + } + , + + { + "id": "metaopt/TorchOpt" + } + , + + { + "id": "nicolas-chaulet/torch-points3d" + } + , + + { + "id": "mit-han-lab/torchquantum" + } + , + + { + "id": "allegroai/clearml" + } + , + + { + "id": "huggingface/transformers" + } + , + + { + "id": "NVIDIA/Torch-TensorRT" + } + , + + { + "id": "microsoft/Semi-supervised-learning" + } + , + + { + "id": "facebookresearch/vissl" + } + , + + { + "id": "vllm-project/vllm" + } + + ] } diff --git a/ecosystem/Captum/index.html b/ecosystem/Captum/index.html new file mode 100644 index 000000000000..31040df11839 --- /dev/null +++ b/ecosystem/Captum/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

        Redirecting…

        + Click here if you are not redirected. + diff --git a/ecosystem/Flair/index.html b/ecosystem/Flair/index.html new file mode 100644 index 000000000000..9f5fdc549820 --- /dev/null +++ b/ecosystem/Flair/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

        Redirecting…

        + Click here if you are not redirected. + diff --git a/ecosystem/Forte/index.html b/ecosystem/Forte/index.html new file mode 100644 index 000000000000..052928cbd601 --- /dev/null +++ b/ecosystem/Forte/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + forte | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        forte

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Forte is a toolkit for building NLP pipelines featuring composable components, convenient data interfaces, and cross-task interaction.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/Ignite/index.html b/ecosystem/Ignite/index.html new file mode 100644 index 000000000000..8563e096fe40 --- /dev/null +++ b/ecosystem/Ignite/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

        Redirecting…

        + Click here if you are not redirected. + diff --git a/ecosystem/OpenMMLab/index.html b/ecosystem/OpenMMLab/index.html new file mode 100644 index 000000000000..2947557d7b0a --- /dev/null +++ b/ecosystem/OpenMMLab/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + OpenMMLab | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        OpenMMLab

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        OpenMMLab covers a wide range of computer vision research topics including classification, detection, segmentation, and super-resolution.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/accelerate/index.html b/ecosystem/accelerate/index.html new file mode 100644 index 000000000000..e8c3d53d1e30 --- /dev/null +++ b/ecosystem/accelerate/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + accelerate | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        accelerate

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        🚀 A simple way to train and use PyTorch models with multi-GPU, TPU, mixed-precision

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/adaptdl/index.html b/ecosystem/adaptdl/index.html new file mode 100644 index 000000000000..1d78d14a4a6a --- /dev/null +++ b/ecosystem/adaptdl/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + AdaptDL | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        AdaptDL

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        AdaptDL is a resource-adaptive deep learning training and scheduling framework.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/advertorch/index.html b/ecosystem/advertorch/index.html new file mode 100644 index 000000000000..8a53f2c06731 --- /dev/null +++ b/ecosystem/advertorch/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

        Redirecting…

        + Click here if you are not redirected. + diff --git a/ecosystem/albumentations/index.html b/ecosystem/albumentations/index.html new file mode 100644 index 000000000000..cb4fc8153c38 --- /dev/null +++ b/ecosystem/albumentations/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Albumentations | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Albumentations

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Fast and extensible image augmentation library for different CV tasks like classification, segmentation, object detection and pose estimation.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/allennlp/index.html b/ecosystem/allennlp/index.html new file mode 100644 index 000000000000..6c6822d4f35f --- /dev/null +++ b/ecosystem/allennlp/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

        Redirecting…

        + Click here if you are not redirected. + diff --git a/ecosystem/avalanche/index.html b/ecosystem/avalanche/index.html new file mode 100644 index 000000000000..d1753980bb60 --- /dev/null +++ b/ecosystem/avalanche/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + avalanche | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        avalanche

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Avalanche: an End-to-End Library for Continual Learning

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/baal/index.html b/ecosystem/baal/index.html new file mode 100644 index 000000000000..08436d7e3e35 --- /dev/null +++ b/ecosystem/baal/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + baal | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        baal

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        baal (bayesian active learning) aims to implement active learning using metrics of uncertainty derived from approximations of bayesian posteriors in neural networks.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/botorch/index.html b/ecosystem/botorch/index.html new file mode 100644 index 000000000000..c527581fae6a --- /dev/null +++ b/ecosystem/botorch/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

        Redirecting…

        + Click here if you are not redirected. + diff --git a/ecosystem/catalyst/index.html b/ecosystem/catalyst/index.html new file mode 100644 index 000000000000..944d9d69e8a6 --- /dev/null +++ b/ecosystem/catalyst/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Catalyst | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Catalyst

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Catalyst helps you write compact, but full-featured deep learning and reinforcement learning pipelines with a few lines of code.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/clinicadl/index.html b/ecosystem/clinicadl/index.html new file mode 100644 index 000000000000..6df22277af9d --- /dev/null +++ b/ecosystem/clinicadl/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + ClinicaDL | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        ClinicaDL

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Framework for reproducible classification of Alzheimer's Disease

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/colossal-llama-2/index.html b/ecosystem/colossal-llama-2/index.html new file mode 100644 index 000000000000..1872df666d01 --- /dev/null +++ b/ecosystem/colossal-llama-2/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Colossal-LLaMA-2 | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Colossal-LLaMA-2

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        A complete and open-sourced solution for injecting domain-specific knowledge into pre-trained LLM.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/colossal/index.html b/ecosystem/colossal/index.html new file mode 100644 index 000000000000..e7966a4fd8a9 --- /dev/null +++ b/ecosystem/colossal/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + ColossalAI | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        ColossalAI

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Colossal-AI is a Unified Deep Learning System for Big Model Era

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/composer/index.html b/ecosystem/composer/index.html new file mode 100644 index 000000000000..b13e51e710ef --- /dev/null +++ b/ecosystem/composer/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + composer | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        composer

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        library of algorithms to speed up neural network training

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/contributor-awards-2023.html b/ecosystem/contributor-awards-2023.html index c08a831ef719..ba3130663f1c 100644 --- a/ecosystem/contributor-awards-2023.html +++ b/ecosystem/contributor-awards-2023.html @@ -1,12 +1,118 @@ ---- -layout: default -title: Announcing the 2023 PyTorch Contributor Awards -permalink: ecosystem/contributor-awards-2023 -background-class: ecosystem-background -body-class: ecosystem ---- - -
        + + + + + + + + + + + + + Announcing the 2023 PyTorch Contributor Awards | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +

        Announcing the 2023 PyTorch Contributor Awards

        @@ -143,3 +249,170 @@

        PyTorch 2023 Nominees

        + + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/contributor-awards-2024.html b/ecosystem/contributor-awards-2024.html index 7360f40ef845..a4593ac2200f 100644 --- a/ecosystem/contributor-awards-2024.html +++ b/ecosystem/contributor-awards-2024.html @@ -1,12 +1,118 @@ ---- -layout: default -title: Announcing the 2024 PyTorch Contributor Awards -permalink: ecosystem/contributor-awards-2024 -background-class: ecosystem-background -body-class: ecosystem ---- - -
        + + + + + + + + + + + + + Announcing the 2024 PyTorch Contributor Awards | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +

        Announcing the 2024 PyTorch Contributor Awards

        @@ -128,3 +234,170 @@

        PyTorch 2024 Nominees

        + + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/contributors.html b/ecosystem/contributors.html deleted file mode 100644 index 13024213ed53..000000000000 --- a/ecosystem/contributors.html +++ /dev/null @@ -1,98 +0,0 @@ ---- -layout: default -title: Contributors -permalink: /resources/contributors/ -body-class: ecosystem -background-class: ecosystem-join-background -redirect_to: "/newsletter" ---- - -
        -
        -

        PyTorch Contributors

        - -

        The central place for PyTorch contributors to stay up-to-date with the codebase and discover notable RFCs, PRs and more.

        -
        -
        - -
        -
        - - - {% include past_issues.html %} - -
        -
        -

        Newsletter Sign Up

        -

        Follow the contributors newsletter for curated news from across the PyTorch developer community

        -
        - - -
        - View Issues -
        - -
        -
        -

        Join the conversation

        -

        Join the contributor's discussion forum to learn and collaborate on the latest development across PyTorch

        - Contributor Forums -
        -
        -
        -
        -
        - - diff --git a/ecosystem/crypten/index.html b/ecosystem/crypten/index.html new file mode 100644 index 000000000000..b5d204449807 --- /dev/null +++ b/ecosystem/crypten/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

        Redirecting…

        + Click here if you are not redirected. + diff --git a/ecosystem/deepspeed/index.html b/ecosystem/deepspeed/index.html new file mode 100644 index 000000000000..c2d5b58cd018 --- /dev/null +++ b/ecosystem/deepspeed/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + DeepSpeed | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        DeepSpeed

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        DeepSpeed is a deep learning optimization library that makes distributed training easy, efficient, and effective.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/depyf/index.html b/ecosystem/depyf/index.html new file mode 100644 index 000000000000..4f17aaf889fe --- /dev/null +++ b/ecosystem/depyf/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + depyf | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        depyf

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        depyf is a tool to help users understand and adapt to PyTorch compiler torch.compile.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/detectron2/index.html b/ecosystem/detectron2/index.html new file mode 100644 index 000000000000..6099a125656a --- /dev/null +++ b/ecosystem/detectron2/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Detectron2 | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Detectron2

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Detectron2 is FAIR's next-generation platform for object detection and segmentation.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/determined/index.html b/ecosystem/determined/index.html new file mode 100644 index 000000000000..0b96f6757b5e --- /dev/null +++ b/ecosystem/determined/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Determined | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Determined

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Determined is a platform that helps deep learning teams train models more quickly, easily share GPU resources, and effectively collaborate.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/dgl/index.html b/ecosystem/dgl/index.html new file mode 100644 index 000000000000..ad9860d6ab12 --- /dev/null +++ b/ecosystem/dgl/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + DGL | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        DGL

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Deep Graph Library (DGL) is a Python package built for easy implementation of graph neural network model family, on top of PyTorch and other frameworks.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/diffusers/index.html b/ecosystem/diffusers/index.html new file mode 100644 index 000000000000..b2ebbb8cc747 --- /dev/null +++ b/ecosystem/diffusers/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Diffusers | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Diffusers

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Diffusers provides pretrained diffusion models across multiple modalities, such as vision and audio, and serves as a modular toolbox for inference and training of diffusion models.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/doctr/index.html b/ecosystem/doctr/index.html new file mode 100644 index 000000000000..87539dd9dfcd --- /dev/null +++ b/ecosystem/doctr/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + docTR | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        docTR

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        docTR (Document Text Recognition) - a seamless, high-performing & accessible library for OCR-related tasks powered by Deep Learning.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/ecosystem.html b/ecosystem/ecosystem.html deleted file mode 100644 index b60f4bc5efd2..000000000000 --- a/ecosystem/ecosystem.html +++ /dev/null @@ -1,143 +0,0 @@ ---- -layout: default -title: Ecosystem -permalink: ecosystem/ -background-class: ecosystem-background -body-class: ecosystem -redirect_to: https://landscape.pytorch.org/ ---- - -
        -
        -

        - Ecosystem
        - Tools -

        - -

        Tap into a rich ecosystem of tools, libraries, and more to support, accelerate, and explore AI development.

        -

        Join the Ecosystem

        - - -
        -
        - -
        -
        -
        - - - - {% include ecosystem_sort.html %} - -
        - -
        -
        - {% assign ecosystem = site.ecosystem | sample: site.ecosystem.size %} - {% for item in ecosystem %} - - {% endfor %} -
        - -
        - -
        -

        Have a project you want featured?

        -

        Join the PyTorch ecosystem

        -
        - -
        -
        -
        - - - - - -
        - diff --git a/ecosystem/einops/index.html b/ecosystem/einops/index.html new file mode 100644 index 000000000000..05b721f237a3 --- /dev/null +++ b/ecosystem/einops/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + einops | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        einops

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Flexible and powerful tensor operations for readable and reliable code.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/ensemble-pytorch/index.html b/ecosystem/ensemble-pytorch/index.html new file mode 100644 index 000000000000..3c1654f10b91 --- /dev/null +++ b/ecosystem/ensemble-pytorch/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Ensemble-Pytorch | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Ensemble-Pytorch

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        A unified ensemble framework for PyTorch to improve the performance and robustness of your deep learning model.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/fairscale/index.html b/ecosystem/fairscale/index.html new file mode 100644 index 000000000000..7cf40e1e58d0 --- /dev/null +++ b/ecosystem/fairscale/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + FairScale | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        FairScale

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        FairScale is a PyTorch extension library for high performance and large scale training on one or multiple machines/nodes.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/fastai/index.html b/ecosystem/fastai/index.html new file mode 100644 index 000000000000..5bd21367f9ff --- /dev/null +++ b/ecosystem/fastai/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

        Redirecting…

        + Click here if you are not redirected. + diff --git a/ecosystem/flower/index.html b/ecosystem/flower/index.html new file mode 100644 index 000000000000..5ddc67064be9 --- /dev/null +++ b/ecosystem/flower/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Flower | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Flower

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Flower - A Friendly Federated Learning Framework

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/fusemedml/index.html b/ecosystem/fusemedml/index.html new file mode 100644 index 000000000000..994c0663ad1b --- /dev/null +++ b/ecosystem/fusemedml/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + FuseMedML | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        FuseMedML

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        FuseMedML is a python framework accelerating ML based discovery in the medical field by encouraging code reuse

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/gandlf/index.html b/ecosystem/gandlf/index.html new file mode 100644 index 000000000000..30acd73a4355 --- /dev/null +++ b/ecosystem/gandlf/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + GaNDLF | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        GaNDLF

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        A generalizable application framework for segmentation, regression, and classification using PyTorch

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/glow/index.html b/ecosystem/glow/index.html new file mode 100644 index 000000000000..0908b1aea9fe --- /dev/null +++ b/ecosystem/glow/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

        Redirecting…

        + Click here if you are not redirected. + diff --git a/ecosystem/gpytorch/index.html b/ecosystem/gpytorch/index.html new file mode 100644 index 000000000000..fdfc910bb3d4 --- /dev/null +++ b/ecosystem/gpytorch/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

        Redirecting…

        + Click here if you are not redirected. + diff --git a/ecosystem/higher/index.html b/ecosystem/higher/index.html new file mode 100644 index 000000000000..f713914052c7 --- /dev/null +++ b/ecosystem/higher/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + higher | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        higher

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        higher is a library which facilitates the implementation of arbitrarily complex gradient-based meta-learning algorithms and nested optimisation loops with near-vanilla PyTorch.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/horovod/index.html b/ecosystem/horovod/index.html new file mode 100644 index 000000000000..4e2bfca1d1fa --- /dev/null +++ b/ecosystem/horovod/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

        Redirecting…

        + Click here if you are not redirected. + diff --git a/ecosystem/hummingbird/index.html b/ecosystem/hummingbird/index.html new file mode 100644 index 000000000000..4afe60520698 --- /dev/null +++ b/ecosystem/hummingbird/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Hummingbird | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Hummingbird

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Hummingbird compiles trained ML models into tensor computation for faster inference.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/hydra/index.html b/ecosystem/hydra/index.html new file mode 100644 index 000000000000..0cefa9719607 --- /dev/null +++ b/ecosystem/hydra/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Hydra | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Hydra

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        A framework for elegantly configuring complex applications.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/inc/index.html b/ecosystem/inc/index.html new file mode 100644 index 000000000000..3f92b1d0e986 --- /dev/null +++ b/ecosystem/inc/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + neural-compressor | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        neural-compressor

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        IntelÂź Neural Compressor provides unified APIs for network compression technologies for faster inference

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/index.html b/ecosystem/index.html new file mode 100644 index 000000000000..757af3e1be1b --- /dev/null +++ b/ecosystem/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

        Redirecting…

        + Click here if you are not redirected. + diff --git a/ecosystem/ipex/index.html b/ecosystem/ipex/index.html new file mode 100644 index 000000000000..6ae9e601a8b6 --- /dev/null +++ b/ecosystem/ipex/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + intel-extension-for-pytorch | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        intel-extension-for-pytorch

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        A Python package for improving PyTorch performance on Intel platforms

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/ivy/index.html b/ecosystem/ivy/index.html new file mode 100644 index 000000000000..a4db13a43e71 --- /dev/null +++ b/ecosystem/ivy/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + ivy | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        ivy

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        The Unified Machine Learning Framework

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/joeynmt/index.html b/ecosystem/joeynmt/index.html new file mode 100644 index 000000000000..9d464eb100be --- /dev/null +++ b/ecosystem/joeynmt/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + joeynmt | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        joeynmt

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Minimalist Neural Machine Translation toolkit for educational purposes

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/join.html b/ecosystem/join.html index ce102e84e325..96432a4b1f3e 100644 --- a/ecosystem/join.html +++ b/ecosystem/join.html @@ -1,8 +1,11 @@ ---- -layout: default -title: Join -permalink: ecosystem/join.html -body-class: ecosystem -background-class: ecosystem-join-background -redirect_to: https://github.com/pytorch-fdn/ecosystem ---- + + + + Redirecting… + + + + +

        Redirecting…

        + Click here if you are not redirected. + diff --git a/ecosystem/kornia/index.html b/ecosystem/kornia/index.html new file mode 100644 index 000000000000..5f190356fef9 --- /dev/null +++ b/ecosystem/kornia/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Kornia | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Kornia

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Kornia is a differentiable computer vision library that consists of a set of routines and differentiable modules to solve generic CV problems.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/l5kit/index.html b/ecosystem/l5kit/index.html new file mode 100644 index 000000000000..8e056a250012 --- /dev/null +++ b/ecosystem/l5kit/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + L5Kit | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        L5Kit

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        ML Prediction, Planning and Simulation for Self-Driving built on PyTorch.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/lightly/index.html b/ecosystem/lightly/index.html new file mode 100644 index 000000000000..69981837af44 --- /dev/null +++ b/ecosystem/lightly/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Lightly | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Lightly

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Lightly is a computer vision framework for self-supervised learning.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/ludwig/index.html b/ecosystem/ludwig/index.html new file mode 100644 index 000000000000..346efb005a2f --- /dev/null +++ b/ecosystem/ludwig/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + ludwig | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        ludwig

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Data-centric declarative deep learning framework

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/mmf/index.html b/ecosystem/mmf/index.html new file mode 100644 index 000000000000..72c680c9cb2d --- /dev/null +++ b/ecosystem/mmf/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + MMF | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        MMF

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        A modular framework for vision & language multimodal research from Facebook AI Research (FAIR).

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/monai/index.html b/ecosystem/monai/index.html new file mode 100644 index 000000000000..564140fa474b --- /dev/null +++ b/ecosystem/monai/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + MONAI | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        MONAI

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        MONAI provides domain-optimized foundational capabilities for developing healthcare imaging training workflows.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/nemo/index.html b/ecosystem/nemo/index.html new file mode 100644 index 000000000000..490f6196f064 --- /dev/null +++ b/ecosystem/nemo/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + NeMo | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        NeMo

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        NeMo: a toolkit for conversational AI.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/octoml/index.html b/ecosystem/octoml/index.html new file mode 100644 index 000000000000..43ea4e7bf1a7 --- /dev/null +++ b/ecosystem/octoml/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + OctoML Profile | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        OctoML Profile

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        octoml-profile is a python library and cloud service designed to provide a simple experience for assessing and optimizing the performance of PyTorch models.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/onnxrt/index.html b/ecosystem/onnxrt/index.html new file mode 100644 index 000000000000..8503babd39d5 --- /dev/null +++ b/ecosystem/onnxrt/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + ONNX Runtime | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        ONNX Runtime

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        ONNX Runtime is a cross-platform inferencing and training accelerator.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/opacus/index.html b/ecosystem/opacus/index.html new file mode 100644 index 000000000000..785e3fd09fe0 --- /dev/null +++ b/ecosystem/opacus/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Opacus | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Opacus

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Train PyTorch models with Differential Privacy

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/opencompass/index.html b/ecosystem/opencompass/index.html new file mode 100644 index 000000000000..e3bf42ebdbfd --- /dev/null +++ b/ecosystem/opencompass/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + OpenCompass | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        OpenCompass

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        OpenCompass is an LLM evaluation platform, supporting a wide range of models (Llama3, Mistral, InternLM2,GPT-4,LLaMa2, Qwen,GLM, Claude, etc) over 100+ datasets.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/optuna/index.html b/ecosystem/optuna/index.html new file mode 100644 index 000000000000..bdf9fe68e7b9 --- /dev/null +++ b/ecosystem/optuna/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Optuna | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Optuna

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        An open source hyperparameter optimization framework to automate hyperparameter search.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/padl/index.html b/ecosystem/padl/index.html new file mode 100644 index 000000000000..c445c4011a78 --- /dev/null +++ b/ecosystem/padl/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + padl | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        padl

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Pipeline Abstractions for Deep Learning in PyTorch

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/parlai/index.html b/ecosystem/parlai/index.html new file mode 100644 index 000000000000..cb4951482d8f --- /dev/null +++ b/ecosystem/parlai/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

        Redirecting…

        + Click here if you are not redirected. + diff --git a/ecosystem/pennylane/index.html b/ecosystem/pennylane/index.html new file mode 100644 index 000000000000..ba362b35d754 --- /dev/null +++ b/ecosystem/pennylane/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

        Redirecting…

        + Click here if you are not redirected. + diff --git a/ecosystem/pfrl/index.html b/ecosystem/pfrl/index.html new file mode 100644 index 000000000000..6fc6f07cae6d --- /dev/null +++ b/ecosystem/pfrl/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + PFRL | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        PFRL

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        PFRL is a deep reinforcement learning library that implements various state-of-the-art deep reinforcement algorithms in Python using PyTorch.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/polyaxon/index.html b/ecosystem/polyaxon/index.html new file mode 100644 index 000000000000..5e1993290f46 --- /dev/null +++ b/ecosystem/polyaxon/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Polyaxon | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Polyaxon

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Polyaxon is a platform for building, training, and monitoring large-scale deep learning applications.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/pomegranate/index.html b/ecosystem/pomegranate/index.html new file mode 100644 index 000000000000..e8877ef89a8e --- /dev/null +++ b/ecosystem/pomegranate/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + pomegranate | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        pomegranate

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        pomegranate is a library of probabilistic models that is built in a modular manner and treats all models as the probability distributions that they are.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/poptorch/index.html b/ecosystem/poptorch/index.html new file mode 100644 index 000000000000..14f471fd3d17 --- /dev/null +++ b/ecosystem/poptorch/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + PopTorch | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        PopTorch

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        The PopTorch interface library is a simple wrapper for running PyTorch programs directly on Graphcore IPUs.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/poutyne/index.html b/ecosystem/poutyne/index.html new file mode 100644 index 000000000000..65cb503cbbe4 --- /dev/null +++ b/ecosystem/poutyne/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Poutyne | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Poutyne

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Poutyne is a Keras-like framework for PyTorch and handles much of the boilerplating code needed to train neural networks.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/ptc/2022.html b/ecosystem/ptc/2022.html index d4bfc82efa4e..19cc8d5af0e9 100644 --- a/ecosystem/ptc/2022.html +++ b/ecosystem/ptc/2022.html @@ -1,12 +1,118 @@ ---- -layout: default -title: PyTorch Conference 2022 -permalink: ecosystem/ptc/2022 -background-class: features-background -body-class: ecosystem ---- - -
        + + + + + + + + + + + + + PyTorch Conference 2022 | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +

        PyTorch Conference

        2022

        @@ -27,32 +133,760 @@

        2022


        Posters

        - {% for poster in site.data.ecosystem.ptc['2022'].posters %} + +
        +
        + + + +
        + + Enabling State-of-the-art Interpretability for Medical Imaging Using PyTorch + +
        +
        Dinkar Juyal, Syed Asher Javed, Harshith Padigela, Limin Yu, Aaditya Prakash, Logan Kilpatrick, Anand Sampat, PathAI
        +

        PathAI is a Boston based company focussed on improving patient care using AI powered pathology. We heavily use PyTorch for building our ML systems, specifically training and deploying models on large gigapixel pathology images. In this case study, we highlight our use of PyTorch to build, experiment and deploy Additive Multiple Instance Learning (MIL) models. Additive MIL is a novel MIL technique built using PyTorch Lightning which allows end-to-end learning from millions of pixels while providing granular interpretability of spatial heatmaps. These models allow for the exact computation of the extent to which each smaller region in the gigapixel-sized image contributes to the final model prediction. This enables class-wise excitatory and inhibitory contributions to be visualized on top of the pathology image. This informs the practitioners of model failures and guides the pathologists to areas of interest. All this is made possible due to PyTorch's rapid research-to-prototype-to-deployment iteration cycle.

        + +

        + COMPUTER VISION +

        +
        +
        + +
        +
        + + + +
        + + TorchUnmix: Automatic Stain Unmixing and Augmentation for Histopathology Images in PyTorch + +
        +
        Erik Hagendorn
        +

        TorchUnmix is a library which aims to provide automatic stain unmixing and augmentation for histopathology whole slide images. Separation of histochemical stains (unmixing) is performed by orthonormal transformation of the RGB pixel data from predefined light absorption coefficients called stain vectors [1]. Precomputed publicly available stain vector definitions are often used, but inter-laboratory variation due to the histology and/or image acquisition process is common, yielding suboptimal unmixing results. Classical stain vector estimation methods rely on abundant distribution of stains, making them less practical for sparser distributions as observed from immunohistochemical stains. Geis et al. proposed a method based on k-means clustering of pixel values in the hue-saturation-density color space to determine optimal stain vectors which has been used in this work [2]. While stain vectors may be used for quantification of individual stains, TorchUnmix also provides functionalities to perform stain augmentation. Stain augmentation is a method used during the training process of deep learning models to improve generalization by unmixing the image, stochastically modifying the individual stains, and then compositing the stains into the final augmented image [3]. To our knowledge, no other libraries fully implement the above methods in PyTorch, utilizing GPU-acceleration. Additionally, TorchUnmix has extended all calculations used to perform the automatic stain unmixing and augmentation to operate on batches of images, drastically accelerating execution performance speeds in comparison to other libraries.

        + +

        + LIBRARIES +

        +
        +
        + +
        +
        + + + +
        + + Scalable Training and Inference With Ray AIR + +
        +
        Kai Fricke, Balaji Veeramani
        +

        Scaling machine learning is hard: Cloud platform solutions like SageMaker can limit flexibility, but a custom distributed framework is often too hard to implement. In effect, ML engineers struggle to scale their workloads from local prototyping to the cloud. + The Ray AI Runtime ('Ray AIR') is an integrated collection of machine learning libraries built around distributed computing framework Ray. It provides an easy to use interface for scalable data processing, training, tuning, batch prediction, and online serving. Adapting existing PyTorch training loops to Ray AIR's PyTorch integration needs as little as 10 lines of code changes. And scaling from local development to the cloud needs no code changes at all.

        + +

        + LIBRARIES +

        +
        +
        + +
        +
        + + + +
        + + AutoMAD: Mixed Mode Autodiff for PyTorch Models + +
        +
        Jan HĂŒckelheim
        +

        Mixed Mode autodiff combines back-propagation and forward differentiation. Both modes have pros and cons: Back-propagation is efficient for scalar functions with many trainable parameters. Back-propagation uses memory for intermediate results, requires data flow reversal, scales poorly for many output variables. Forward differentiation is straightforward to implement, memory-efficient, and easy to vectorize/parallelize or port to new hardware. Forward mode scales poorly with large number of trainable parameters. AutoMAD makes it possible to combine both modes. Use forward differentiation for some layers, while using back-prop for others.

        + +

        + LIBRARIES +

        +
        +
        + +
        +
        + + + +
        + + xFormers: Building Blocks for Efficient Transformers + +
        +
        Daniel Haziza, Francisco Massa, Jeremy Reizenstein, Patrick Labatut, Diana Liskovich
        +

        We present xFormers, a toolbox to accelerate research on Transformers. It contains efficient components, like an exact memory-efficient multi-head attention that can accelerate trainings 2x while using a fraction of the memory. xFormers components are also customizable and can be combined together to build variations of Transformers. Our hope is to enable the next generation of research based on Transformers.

        + +

        + LIBRARIES +

        +
        +
        + +
        +
        + + + +
        + + linear_operator - Structured Linear Algebra in PyTorch + +
        +
        Max Balandat
        +

        linear_operator (https://github.com/cornellius-gp/linear_operator) is a library for structured linear algebra built on PyTorch. It provides a LinearOperator class that represents a tensor that is never instantiated but is instead accessed through operations like matrix multiplication, solves, decompositions, and indexing. These objects use custom linear algebra operations that can exploit particular matrix structure (e.g. diagonal, block-diagonal, triangular, Kronecker, etc.) in computations in order to achieve substantial (many orders of magnitude) improvements in time and memory complexity. Moreover, many efficient linear algebra operations (e.g. solves, decompositions, indexing, etc.) can be automatically generated from the LinearOperator's matmul function. This makes it extremely easy to compose or implement custom LinearOperators. + The key aspect that makes linear_operator easy to use in PyTorch code is its integration with the `__torch_function__` interface - Common linear algebra operations (such as matrix multiplication, solve, SVD) are mapped to the respective torch functions (`__matmul__`, `torch.linalg.solve`, `torch.linalg.svd`), so that LinearOperator objects can be used as drop-in replacements for dense tensors even in existing code. LinearOperator operations themselves may return LinearOperator objects, automatically keeping track of algebraic structure after each computation. As a result, users never need to reason about what efficient linear algebra routines to use (so long as the input elements defined by the user encode known input structure).

        + +

        + LIBRARIES +

        +
        +
        + +
        +
        + + + +
        + + Declarative Machine Learning with Ludwig: End-to-end Machine Learning Pipelines Using Simple and Flexible Data-driven Configurations + +
        +
        Justin Zhao
        +

        Ludwig is a declarative machine learning framework that makes it easy to define and compare machine learning pipelines using a simple and flexible data-driven configuration system. The minimal configuration declares the input and output features with their respective data types. Users can specify additional parameters to preprocess, encode, and decode features, load from pre-trained models, compose the internal model architecture, set training parameters, or run hyperparameter optimization. Ludwig will build an end-to-end machine learning pipeline automatically, using whatever is explicitly specified in the configuration, while falling back to smart defaults for any parameters that are not. Scientists, engineers, and researchers use Ludwig to explore state-of-the-art model architectures, run hyperparameter search, and scale up to larger than available memory datasets and multi-node clusters, on a variety of problems using structured and unstructured features. Ludwig has 8.5K+ stars on Github and is built on top of PyTorch, Horovod, and Ray.

        + +

        + LIBRARIES +

        +
        +
        + +
        +
        + + + +
        + + Generalized Shapes: Block Sparsity, MaskedTensor, NestedTensor + +
        +
        Christian Puhrsch
        +

        This poster presents an overview of available and ongoing developments related to sparse memory formats, masked computation, and support for collections of variably shaped data. In particular it contains a case study of block sparse memory formats, MaskedTensor, and NestedTensor.

        + +

        + LIBRARIES +

        +
        +
        + +
        +
        + + + +
        + + Betty: An Automatic Differentiation Library for Generalized Meta Learning + +
        +
        Sang Keun Choe
        +

        Betty is a simple, scalable and modular library for generalized meta-learning (GML) and multilevel optimization (MLO), built upon PyTorch, that allows a unified programming interface for a number of GML/MLO applications including few-shot learning, hyperparameter optimization, neural architecture search, data reweighting, and many more. The internal autodiff mechanism and the software design of Betty are developed by the novel interpretation of GML/MLO as a dataflow graph.

        + +

        + LIBRARIES +

        +
        +
        + +
        +
        + + + +
        + + Functorch: Composable Function Transforms in Pytorch + +
        +
        Samantha Andow, Richard Zhou, Horace He, Animesh Jain
        +

        Inspired by Google JAX, functorch is a library in Pytorch that offers composable vmap (vectorization) and autodiff transforms (grad, vjp, jvp). Since its first release alongside Pytorch 1.11, combining these transforms has helped users develop and explore new techniques that were previously tricky to write in Pytorch, like Neural Tangent Kernels and non-linear optimizations (see Theseus, also from PyTorch). This will go through some basic usages and highlight some research that leverages functorch.

        + +

        + LIBRARIES +

        +
        +
        + +
        +
        + + + +
        + + Large-Scale Neural Solvers for Partial Differential Equations + +
        +
        Patrick Stiller, Jeyhun Rustamov, Friedrich Bethke, Maksim Zhdanov, Raj Sutarya, Mahnoor Tanveer, Karan Shah, Richard Pausch, Sunna Torge, Alexander Debus, Attila Cangi, Peter Steinbach, Michael Bussmann, Nico Hoffmann
        +

        Our open-source Neural Solvers framework provides data-free ML-based solvers for the study and analysis of phenomena in natural sciences built on top of Pytorch. We were the first to show that certain quantum systems modeled by the 2d Schrödinger equation can be accurately solved while retaining strong scaling. We also developed a novel neural network architecture, GatedPINN [1], introducing adaptable domain decomposition into the training of Physics-informed Neural Networks based on the Mixture-of-Experts paradigm. Distributed large-scale training of our GatedPINN is facilitated by Horovod, resulting in excellent GPU utilization making Neural Solvers ready for the upcoming exascale era. Upcoming projects involve higher dimensional problems such as 3d laser systems and coupled models to study the Vlasov-Maxwell system. Further experiments on novel very scalable compute hardware paves the way for applications of high-fidelity Neural Solvers to real-world applications such as Inverse Scattering Problems.

        + +

        + LIBRARIES +

        +
        +
        + +
        +
        + + + +
        + + PyTorch Video: A Deep Learning Library for Video Understanding + +
        +
        Haoqi Fan
        +

        PyTorchVideo is the deep learning library for video understanding research in PyTorch. +

        + +

        + LIBRARIES +

        +
        +
        + +
        +
        + + + +
        + + Model Preparation Federated Learning and Device Computation + +
        +
        Zhihan Fang
        +

        Federated Learning with Differential Privacy has witnessed an increased adoption as one of the most promising ways to train machine learning models while preserving user privacy. Existing models in Meta around people attributes are mostly built on traditional centralized machine learning methods. Recently, due to the increasing concerns about user privacy internally and externally, Machine Learning teams at Meta are experiencing either signal loss or restriction on applying new features in models to further improve model performance. In this paper, we are introducing a generic framework we built for preparing and generating models for federated learning. The model preparation process is to utilize traditional machine learning to understand model structure and hyperparameters for the target problems including training, inference, evaluations. It also requires a simulation process to train the target model structure and understand the simulated environment on the server side to tune FL specific hyperparameters. + The model generation process is to generate device compatible models, which can be used directly on users’ devices for federated learning. We applied the FL framework on our on-device models, and integrated with device signals to improve user experience and protect user privacy.

        + +

        + LIBRARIES +

        +
        +
        + +
        +
        + + + +
        + + Constrained Optimization in PyTorch With Cooper + +
        +
        Jose Gallego-Posada, Juan Camilo Ramirez
        +

        Cooper (https://github.com/cooper-org/cooper) is a general-purpose, deep learning-first constrained optimization library in PyTorch. Cooper is (almost!) seamlessly integrated with PyTorch and preserves the usual loss backward step workflow. If you are already familiar with PyTorch, using Cooper will be a breeze! + This library aims to encourage and facilitate the study of constrained optimization problems in deep learning. Cooper focuses on non-convex constrained optimization problems for which the loss or constraints are not necessarily “nicely behaved” or “theoretically tractable”. Moreover, Cooper has been designed to play nicely with mini-batched/stochastic estimates for the objective and constraint functions. + Cooper implements several popular constrained optimization protocols so you can focus on your project, while we handle the nitty-gritty behind the scenes.

        + +

        + https://github.com/cooper-org/cooper +

        + +

        + LIBRARIES +

        +
        +
        + +
        +
        + + + +
        + + Two Dimensional Parallelism Using Distributed Tensors + +
        +
        Wanchao Liang, Junjie Wang
        +

        This talk will introduce 2-dimensional parallelism with PyTorch (Data Parallelism + Tensor Parallelism) using Distributed Tensor, a fundamental distributed primitive offered by PyTorch Distributed that empowers Tensor Parallelism. We have proven that using FSDP + Tensor Parallelism together could enable us to train large models like Transformer, and increase training performance. We offer end to end training techniques that enable you to train models in 2-D parallelism fashion, and checkpoint save/load models in a distributed manner.

        + +

        + LIBRARIES +

        +
        +
        + +
        +
        + + + +
        + + PyTorch Tabular: A Framework for Deep Learning with Tabular Data + +
        +
        Manu Joseph
        +

        In spite of showing unreasonable effectiveness in modalities like text and image, Deep Learning has always lagged Gradient Boosting in tabular data- both in popularity and performance. But recently there have been newer models created specifically for tabular data, which is pushing the performance bar. Popularity is still a challenge, however, because there is no easy, ready-to-use library like Sci-Kit Learn for deep learning. PyTorch Tabular aims to change that by being an easy-to-use and flexible framework which makes using SOTA model architectures in tabular data as easy as Sci-Kit Learn.

        + +

        + LIBRARIES +

        +
        +
        + +
        +
        + + + +
        + + Better Transformer: Accelerating Transformer Inference in PyTorch + +
        +
        Michael Gschwind, Christian Puhrsch, Driss Guessous, Rui Zhu, Daniel Haziza, Francisco Massa
        +

        We introduce Better Transformer, the PyTorch project to accelerate Transformers for inference and training with out-of-the-box enablement by implementing the Better Transformer ‘fastpath’. Fastpath accelerates many of the most commonly executed functions in Transformer models. Starting with PyTorch 1.13, the PyTorch Core API is implemented with accelerated operations to deliver up to 2x-4x speedups on many Transformer models, such as BERT and XLM-R. Accelerated operations are based on (1) operator and kernel fusion and (2) exploiting sparsity created by variable sequence-length NLP batches. In addition to improving MultiHeadAttention with fastpath, the model also includes sparsity support for MultiHeadAttention and TransformerEncoder modules to take advantage of variable sequence-length information with Nested Tensors for NLP models. + At present, we enable torchtext and Hugging Face domain libraries with Better Transformer, delivering significant speedups for text, image, and audio models. Starting with the next release, PyTorch core will include even faster fused kernels and training support. You can preview these features today with PyTorch Nightlies, the nightly preview builds of the upcoming PyTorch release.

        + +

        + LIBRARIES +

        +
        +
        + +
        +
        + + + +
        + + PiPPy: Automated Pipeline Parallelism for PyTorch + +
        +
        Ke Wen, Pavel Belevich, Anjali Sridhar
        +

        PiPPy is a library that provides automated pipeline parallelism for PyTorch models. With compiler techniques, PiPPy splits a model into pipeline stages without requiring model changes. PiPPy also provides a distributed runtime that distributes the split stages to multiple devices and hosts and orchestrates micro-batch execution in an overlapped fashion. We demonstrate application of PiPPy to Hugging Face models achieving 3x speedup on cloud platforms.

        + +

        + LIBRARIES +

        +
        +
        + +
        +
        + + + +
        + + Practical Guide on PyTorch Inference Using AWS Inferentia + +
        +
        Keita Watanabe
        +

        In this session we will go through step-by-step how to conduct the inference process of machine learning models using Inferentia. In addition, we compare the inference performance with GPU and discuss the cost advantage. In the later part of the session, we will also cover model deployment on Kubernetes.

        + +

        + OPTIMIZATION +

        +
        +
        + +
        +
        + + + +
        + + PyG Performance Optimization for CPU + +
        +
        Mingfei Ma
        +

        Accelerating PyG CPU performance with faster sparse aggregation. +PyG is a library built upon PyTorch to easily write and train Graph Neural Networks, which heavily relies on the mechanism of Message Passing for information aggregation. We have optimized critical bottlenecks of Message Passing from PyTorch, including: 1. Scatter Reduce: maps to classic PyG use case when the EdgeIndex is stored in COO memory format. 2. SpMM Reduce: maps to the usage case when the EdgeIndex is stored in CSR memory format.

        + +

        + OPTIMIZATION +

        +
        +
        + +
        +
        + + + +
        + + Quantization in PyTorch 2.0 Export + +
        +
        Jerry Zhang
        +

        Currently, PyTorch Architecture Optimization (torch.ao) offers two quantization flow tools: eager mode quantization (beta) and fx graph mode quantization (prototype). With PyTorch 2.0 coming up, we are going to redesign quantization on top of the PyTorch 2.0 export path, this talk will introduce our plans for supporting quantization in PyTorch 2.0 export path, its main advantages over the previous tools, and how modeling developers and backend developers will be interacting with this flow.

        + +

        + OPTIMIZATION +

        +
        +
        + +
        +
        + + + +
        + + Torch-TensorRT: A Compiler for Accelerating PyTorch Inference Using TensorRT + +
        +
        Naren Dasan, Dheeraj Peri, Bo Wang, Apurba Bose, George Stefanakis, Nick Comly, Wei Wei, Shirong Wu, Yinghai Lu
        +

        Torch-TensorRT is an open-source compiler targeting NVIDIA GPUs for high-performance deep-learning inference in PyTorch. It combines the usability of PyTorch with the performance of TensorRT allowing for easy optimization of inference workloads on NVIDIA GPUs. Torch-TensorRT supports all classes of optimizations in TensorRT including reduced mixed precision down to INT8, through simple Python & C++ APIs designed to work directly from PyTorch. Torch-TensorRT outputs standard PyTorch modules as well as the TorchScript format to allow for a completely self-contained, portable, & static module with TensorRT engines embedded. We present recent improvements to Torch-TensorRT including the new FX frontend which allows developers to use a full Python workflow for optimizing models and extend Torch-TensorRT in Python, the unified Torch-TensorRT Runtime which enables hybrid FX + TorchScript workflows and discuss future work for the project.

        + +

        + OPTIMIZATION +

        +
        +
        + +
        +
        + + + +
        + + Accelerating Inference with PyTorch by Leveraging Graph Fusions With oneDNN Graph + +
        +
        Sanchit Jain
        +

        The open-source oneDNN Graph library extends oneDNN with a flexible graph API to maximize the optimization opportunities for generating efficient code on AI hardware (currently x86-64 CPUs, but GPU support is on the way). It automatically identifies the graph partitions to be accelerated via fusion. Its fusion patterns entail fusing compute-intensive operations such as convolution, matmul and their neighbor operations for both inference and training use cases. Since PyTorch 1.12, oneDNN Graph has been supported as an experimental feature to speed up inference with Float32 datatype on x86-64 CPUs. Support for inference with oneDNN Graph using BFloat16 datatype exists in the PyTorch master branch, and hence also in nightly PyTorch releases. Intel Extension for PyTorch is an open-source library that builds on top of PyTorch, and can be thought of as a 'staging-ground' for optimizations in PyTorch from Intel. It leverages oneDNN Graph for inference with int8 datatype. This poster presents reproducible results with PyTorch’s TorchBench benchmarking suite to demonstrate the inference speedup achieved with PyTorch & oneDNN Graph using Float32, BFloat16 & int8 datatypes.

        + +

        + OPTIMIZATION +

        +
        +
        + +
        +
        + + + +
        + + Back to Python: Extending PyTorch Without Touching C++ + +
        +
        Alban Desmaison
        +

        This poster presents the new extension points that the PyTorch team has designed to allow users to extend PyTorch from Python. We will cover an introduction to Tensor Subclassing, Modes and torch library. We will briefly describe each extension point and talk through examples such as memory profiling, logging used operators, quantization and custom sparse kernel all in less than 100 LOC. We will also introduce the new ways you can add new devices and author kernels without the need to modify PyTorch directly.

        + +

        + OTHER +

        +
        +
        +
        - {% if poster.poster_link %} - - {% endif %} +
        - {% if poster.poster_link %} - {{ poster.title }} - {% else %} {{ poster.title }} {% endif %} + + Functionalization in PyTorch +
        -
        {{ poster.authors | join: ", "}}
        -

        {{ poster.description }}

        - {% if poster.link %} +
        Brian Hirsh
        +

        Functionalization is a way to remove mutations from arbitrary PyTorch programs sent to downstream compilers. The PyTorch 2.0 stack is all about capturing graphs of PyTorch operations and sending them off to a compiler to get better performance. PyTorch programs can mutate and alias state, making them unfriendly to compilers. Functionalization is a technique to take a program full of PyTorch operators, including mutable and aliasing operators, and remove all mutations from the program while preserving semantics.

        +

        - {{ poster.link }} + OTHER

        - {% endif %} +
        +
        + +
        +
        + + + +
        + + Walmart Search: Serving Models at a Scale on TorchServe + +
        +
        Pankaj Takawale, Dagshayani Kamalaharan, Zbigniew Gasiorek, Rahul Sharnagat
        +

        Walmart Search has embarked on the journey of adopting Deep Learning in the Search ecosystem for improving Search relevance in various parts. As our pilot use case, we wanted to serve the computationally intensive Bert Base model at runtime with an objective to achieve low latency and high throughput. We had JVM hosted web applications loading and serving multiple models. The experimental models were being loaded onto the same applications. These models are large in size and computation is expensive. + We were facing the following limitations with this approach: Refreshing model with the latest version or adding new experimental model would need application deployment. Increased memory pressure on a single application. Slow startup time due to loading multiple ML models during startup. Concurrency was not beneficial due to limited CPU (Metrics on concurrent model prediction vs sequential).

        + +

        + OTHER +

        +
        +
        + +
        +
        + + + +
        + + TorchX: From Local Development to Kubernetes and Back + +
        +
        Joe Doliner, Jimmy Whitaker
        +

        TorchX is incredibly useful for developing PyTorch applications quickly. But when it comes to deployment, nothing is easy. With docker development, Kubernetes, and customer schedulers, there’s a lot to learn. In this talk, we’ll discuss how organizations can deploy to production, why TorchX is a great system for this, and lessons we learned so you can avoid hitting them too.

        + +

        + PRODUCTION +

        +
        +
        + +
        +
        + + + +
        + + Training at Scale Using Fully Sharded Data Parallel (FSDP) with PyTorch/XLA + +
        +
        Shauheen Zahirazami, Jack Cao, Blake Hechtman, Alex Wertheim, Ronghang Hu
        +

        PyTorch/XLA enables PyTorch users to run their models on XLA devices including Google's Cloud TPUs. The latest improvements in PyTorch/XLA enables training PyTorch models using FSDP to train very large models. In this work we present benchmarks and Hardware Flops Utilization of training HuggingFace GPT-2 on Cloud TPU v4.

        +

        - {{ poster.categories }} + PRODUCTION

        - {% endfor %} + +
        +
        + + + +
        + + FSDP Production Readiness + +
        +
        Rohan Varma, Andrew Gu
        +

        This talk dives into recent advances in PyTorch Fully Sharded Data Parallel (FSDP) that have enabled better throughput, memory savings, and extensibility. These improvements have unblocked using FSDP for models of different modalities and for varying model and data sizes. We will share best practices to apply these features to specific use cases such as XLMR, FLAVA, ViT, DHEN, and GPT3-style models.

        + +

        + PRODUCTION +

        +
        +
        + +
        +
        + + + +
        + + Orchestrating Pytorch Workflows With Kubeflow Pipelines and TorchX + +
        +
        Erwin Huizenga, Nikita Namjoshi
        +

        TorchX is a universal job launcher for PyTorch applications that helps ML practitioners speed up iteration time and support end to end production. In this talk, we show you how to build and run TorchX components as a pipeline using the Kubeflow Pipeline (KFL) DSL. We go into detail on how to use KFP and TorchX to build components and how to use KFP DSL to orchestrate and run ML workflows.

        + +

        + PRODUCTION +

        +
        +
        + +
        +
        + + + +
        + + A Community- led and OSS Ecosystem of ML Compiler and Infrastructure Projects + +
        +
        Shauheen Zahirazami, James Rubin, Mehdi Amini, Thea Lamkin, Eugene Burmako, Navid Khajouei
        +

        ML development is often stymied by incompatibilities between frameworks and hardware, forcing developers to compromise on technologies when building ML solutions. OpenXLA is a community-led and open-source ecosystem of ML compiler and infrastructure projects being co-developed by AI/ML leaders including Alibaba, Amazon Web Services, AMD, Arm, Apple, Google, Intel, Meta, NVIDIA, and more. It will address this challenge by letting ML developers build their models on leading frameworks and execute them with high performance across any hardware backend. This flexibility will let developers make the right choice for their project, rather than being locked into decisions by closed systems. Our community will start by collaboratively evolving the XLA compiler and StableHLO, a portable ML compute operation set that makes frameworks easier to deploy across different hardware options.

        + +

        + PRODUCTION +

        +
        +
        + +
        +
        + + + +
        + + Squeezing GPU Memory Usage in PyTorch + +
        +
        Mao Lin, Keren Zhou, Penfei Su
        +

        The limited GPU memory resources can often hinder the performance of GPU-accelerated applications. While PyTorch’s Caching Allocator aims to minimize the number of expensive memory allocations and deallocations and maximize the efficient utilization of GPU memory resources, our study of common deep learning models revealed significant memory fragmentation problems. In some cases, up to 50% of GPU memory is wasted. To better understand the root causes of memory fragmentation, we developed a tool that visualizes GPU memory usage in two ways: the allocator view and the block view. The allocator view presents memory usage with each allocation or deallocation event, and the block view shows the changes in specific memory blocks over time. Our analysis revealed the considerable potential to save GPU memory, which would relieve the bottleneck of limited resources. By employing strategies such as swapping, activation recomputation, and memory defragmentation, we were able to reduce GPU memory waste significantly.

        + +

        + TOOLS +

        +
        +
        + +
        +
        + + + +
        + + 'Brainchop': In Browser MRI Volumetric Segmentation and Rendering + +
        +
        Mohamed Masoud, Farfalla Hu, Sergey Plis
        +

        In brainchop project, we bring high fidelity pre-trained deep learning models for volumetric analysis of structural magnetic resonance imaging (MRI) right to the browsers of scientists and clinicians with no requirement on their technical skills in setting up AI-solutions. All of this in an extensible open-source framework. Our tool is the first front-end MRI segmentation tool on the web that supports full brain volumetric processing in a single pass inside a browser. This property is powered by our lightweight and reliable deep learning model Meshnet that enables volumetric processing of the entire brain at once, which leads to increased accuracy with modest computational requirements. High-quality client-side processing solves the privacy problem, as the data does not need to leave the client. Moreover, browser-based implementation is able to take advantage of available hardware acceleration regardless of the brand or architecture. + GitHub: https://github.com/neuroneural/brainchop

        + +

        + https://github.com/neuroneural/brainchop +

        + +

        + TOOLS +

        +
        +
        + +
        +
        + + + +
        + + TorchBench: Quantifying PyTorch Performance During the Development Loop + +
        +
        Xu Zhao, Will Constable, David Berard, Taylor Robie, Eric Han, Adnan Aziz
        +

        Holding the line of performance is challenging for ML frameworks like PyTorch. The existing AI benchmarks like MLPerf are end-to-end, therefore require large volumes of datasets, at-scale GPU clusters, and long benchmarking time. We develop TorchBench, a novel AI benchmark suite which highlights with minimal data inputs, single GPU, and milliseconds-per-test latencies. TorchBench is now deployed as part of the PyTorch nightly release process, guarding performance/correctness regressions and testing experimental PyTorch features on SOTA machine learning models.

        + +

        + TOOLS +

        +
        +
        + +
        +
        + + + +
        + + Democratizing AI for Biology With OpenFold + +
        +
        Gustaf Ahdritz, Sachin Kadyan, Will Gerecke, Luna Xia, Nazim Bouatta, Mohammed AlQuraishi
        +

        OpenFold, developed by Columbia University, is an open-source protein structure prediction model implemented with PyTorch. The goal of OpenFold is to verify that AlphaFold 2 — DeepMind's protein structure prediction model — can be reproduced from scratch and beyond that, make components of the system available to like-minded researchers and academics so they can build on top of it. During this research, Weights & Biases was used to accelerate OpenFold’s reproduction of AlphaFold 2. The collaborative nature of W&B allowed for insights to scale from a single researcher to the entire team and helped solve the reproducibility challenge in ML.

        + +

        + TOOLS +

        +
        +
        +
        @@ -72,4 +906,170 @@
        {{ poster.authors | join: ", "}}
        $(this).toggle($(this).text().toLowerCase().indexOf(input) > -1); }); }); - \ No newline at end of file + + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/ptdd/2021.html b/ecosystem/ptdd/2021.html index e46be84cf225..a3857634866a 100644 --- a/ecosystem/ptdd/2021.html +++ b/ecosystem/ptdd/2021.html @@ -1,12 +1,118 @@ ---- -layout: default -title: Developer's Day 2021 -permalink: ecosystem/ptdd/2021 -background-class: ecosystem-join-background -body-class: ecosystem ---- - -
        + + + + + + + + + + + + + Developer's Day 2021 | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +

        PyTorch Developer Day

        2021

        @@ -40,43 +146,1189 @@

        2021


        Posters

        - {% for poster in site.data.ecosystem.ptdd['2021'].posters %} +
        - {% if poster.poster_link %} - - {% endif %} +
        - {% if poster.poster_link %} - {{ poster.title }} - {% else %} {{ poster.title }} {% endif %} + + xaitk-saliency: Saliency built for analytics and autonomy applications +
        -
        {{ poster.authors | join: ", "}}
        -

        {{ poster.description }}

        - {% if poster.link %} +
        Brian Hu, Paul Tunison, Elim Schenck, Roddy Collins, Anthony Hoogs
        +

        Despite significant progress in the past few years, machine learning-based systems are still often viewed as “black boxes,” which lack the ability to explain their output decisions to human users. Explainable artificial intelligence (XAI) attempts to help end-users understand and appropriately trust machine learning-based systems. One commonly used technique involves saliency maps, which are a form of visual explanation that reveals what an algorithm pays attention to during its decision process. We introduce the xaitk-saliency python package, an open-source, explainable AI framework and toolkit for visual saliency algorithm interfaces and implementations, built for analytics and autonomy applications. The framework is modular and easily extendable, with support for several image understanding tasks, including image classification, image similarity, and object detection. We have also recently added support for the autonomy domain, by creating saliency maps for pixel-based deep reinforcement-learning agents in environments such as ATARI. Several example notebooks are included that demo the current capabilities of the toolkit. xaitk-saliency will be of broad interest to anyone who wants to deploy AI capabilities in operational settings and needs to validate, characterize and trust AI performance across a wide range of real-world conditions and application areas using saliency maps. To learn more, please visit: https://github.com/XAITK/xaitk-saliency.

        +

        - {{ poster.link }} + https://github.com/XAITK/xaitk-saliency

        - {% endif %} +

        - {{ poster.categories }} + MEDICAL & HEALTHCARE, RESPONSIBLE AI

        - {% endfor %} -
        -
        -
        -
        - \ No newline at end of file + +
        +
        + + + +
        + + CovRNN—A collection of recurrent neural network models for predicting outcomes of COVID-19 patients using their EHR data + +
        +
        Laila Rasmy, Ziqian Xie, Bingyu Mao, Khush Patel, Wanheng Zhang, Degui Zhi
        +

        CovRNN is a collection of recurrent neural network (RNN)-based models to predict COVID-19 patients' outcomes, using their available electronic health record (EHR) data on admission, without the need for specific feature selection or missing data imputation. CovRNN is designed to predict three outcomes: in-hospital mortality, need for mechanical ventilation, and long length of stay (LOS >7 days). Predictions are made for time-to-event risk scores (survival prediction) and all-time risk scores (binary prediction). Our models were trained and validated using heterogeneous and de-identified data of 247,960 COVID-19 patients from 87 healthcare systems, derived from the CernerŸ Real-World Dataset (CRWD) and 36,140 de-identified patients' data derived from the OptumŸ de-identified COVID-19 Electronic Health Record v. 1015 dataset (2007 - 2020). CovRNN shows higher performance than do traditional models. It achieved an area under the receiving operating characteristic (AUROC) of 93% for mortality and mechanical ventilation predictions on the CRWD test set (vs. 91·5% and 90% for light gradient boost machine (LGBM) and logistic regression (LR), respectively) and 86.5% for prediction of LOS > 7 days (vs. 81·7% and 80% for LGBM and LR, respectively). For survival prediction, CovRNN achieved a C-index of 86% for mortality and 92·6% for mechanical ventilation. External validation confirmed AUROCs in similar ranges. https://www.medrxiv.org/content/10.1101/2021.09.27.2126

        + +

        + https://github.com/ZhiGroup/CovRNN +

        + +

        + MEDICAL & HEALTHCARE, RESPONSIBLE AI +

        +
        +
        + +
        +
        + + + +
        + + Farabio - Deep learning for Biomedical Imaging + +
        +
        Sanzhar Askaruly, Nurbolat Aimakov, Alisher Iskakov, Hyewon Cho, Yujin Ahn, Myeong Hoon Choi, Hyunmo Yang, Woonggyu Jung
        +

        Deep learning has transformed many aspects of industrial pipelines recently. Scientists involved in biomedical imaging research are also benefiting from the power of AI to tackle complex challenges. Although the academic community has widely accepted image processing tools, such as scikit-image, ImageJ, there is still a need for a tool which integrates deep learning into biomedical image analysis. We propose a minimal, but convenient Python package based on PyTorch with common deep learning models, extended by flexible trainers and medical datasets. In this work, we also share theoretical dive in the form of course as well as minimal tutorials to run Android applications, containing models trained with Farabio.

        + +

        + https://github.com/tuttelikz/farabio +

        + +

        + MEDICAL & HEALTHCARE, RESPONSIBLE AI +

        +
        +
        + +
        +
        + + + +
        + + TorchIO: Pre-processing & Augmentation of Medical Images for Deep Learning Applications + +
        +
        Fernando Pérez-García, Rachel Sparks, Sébastien Ourselin
        +

        Processing of medical images such as MRI or CT presents different challenges compared to RGB images typically used in computer vision: a lack of labels for large datasets, high computational costs, and the need of metadata to describe the physical properties of voxels. Data augmentation is used to artificially increase the size of the training datasets. Training with image patches decreases the need for computational power. Spatial metadata needs to be carefully taken into account in order to ensure a correct alignment and orientation of volumes. We present TorchIO, an open-source Python library to enable efficient loading, preprocessing, augmentation and patch-based sampling of medical images for deep learning. TorchIO follows the style of PyTorch and integrates standard medical image processing libraries to efficiently process images during training of neural networks. TorchIO transforms can be easily composed, reproduced, traced and extended. We provide multiple generic preprocessing and augmentation operations as well as simulation of MRI-specific artifacts.TorchIO was developed to help researchers standardize medical image processing pipelines and allow them to focus on the deep learning experiments. It encourages good open-science practices, as it supports experiment reproducibility and is version-controlled so that the software can be cited precisely. Due to its modularity, the library is compatible with other frameworks for deep learning with medical images.

        + +

        + https://github.com/fepegar/torchio/ +

        + +

        + MEDICAL & HEALTHCARE, RESPONSIBLE AI +

        +
        +
        + +
        +
        + + + +
        + + MONAI: A Domain Specialized Library for Healthcare Imaging + +
        +
        Michael Zephyr, Prerna Dogra, Richard Brown, Wenqi Li, Eric Kerfoot
        +

        Healthcare image analysis for both radiology and pathology is increasingly being addressed with deep-learning-based solutions. These applications have specific requirements to support various imaging modalities like MR, CT, ultrasound, digital pathology, etc. It is a substantial effort for researchers in the field to develop custom functionalities to handle these requirements. Consequently, there has been duplication of effort, and as a result, researchers have incompatible tools, which makes it hard to collaborate. MONAI stands for Medical Open Network for AI. Its mission is to accelerate the development of healthcare imaging solutions by providing domain-specialized building blocks and a common foundation for the community to converge in a native PyTorch paradigm.

        + +

        + https://monai.io/ +

        + +

        + MEDICAL & HEALTHCARE, RESPONSIBLE AI +

        +
        +
        + +
        +
        + + + +
        + + A Framework for Bayesian Neural Networks + +
        +
        Sahar Karimi, Beliz Gokkaya, Audrey Flower, Ehsan Emamjomeh-Zadeh, Adly Templeton, Ilknur Kaynar Kabul, Erik Meijer
        +

        We are presenting a framework for building Bayesian Neural Networks (BNN). One of the critical use cases of BNNs is uncertainty quantification of ML predictions in deep learning models. Uncertainty quantification leads to more robust and reliable ML systems that are often employed to prevent catastrophic outcomes of overconfident predictions especially in sensitive applications such as integrity, medical imaging and treatments, self driving cars, etc.. Our framework provides tools to build BNN models, estimate the uncertainty of their predictions, and transform existing models into their BNN counterparts. We discuss the building blocks and API of our framework along with a few examples and future directions.

        + +

        + MEDICAL & HEALTHCARE, RESPONSIBLE AI +

        +
        +
        + +
        +
        + + + +
        + + Revamp of torchvision datasets and transforms + +
        +
        Philip Meier, torchvision team, torchdata team
        +

        torchvision provides a lot of image and video datasets as well as transformations for research and prototyping. In fact, the very first release of torchvision in 2016 was all about these two submodules. Since their inception their extent has grown organically and became hard to maintain and sometimes also hard to use. Over the years we have gathered a lot of user feedback and decided to revamp the datasets and transforms. This poster will showcase the current state of the rework and compare it to the hopefully soon to be legacy API.

        + +

        + https://pytorchvideo.org/ +

        + +

        + AUDIO, IMAGE & VIDEO, VISION +

        +
        +
        + +
        +
        + + + +
        + + OpenMMLab: Open-Source Toolboxes for Artificial Intelligence + +
        +
        Wenwei Zhang, Han Lyu, Kai Chen
        +

        OpenMMLab builds open-source tool boxes for computer vision. It aims to 1) provide high-quality codebases to reduce the difficulties in algorithm reimplementation; 2) create efficient deployment toolchains targeting a variety of inference engines and devices; 3) build a solid foundation for the community to bridge the gap between academic research and industrial applications. Based on PyTorch, OpenMMLab develops MMCV to provide unified abstract interfaces and common utils, which serve as a foundation of the whole system. Since the initial release in October 2018, OpenMMLab has released 15+ tool boxes covering different research areas. It has implemented 200+ algorithms and released contain 1800+ pre-trained models. With tighter collaboration with the community, OpenMMLab will open source more toolboxes and full-stack toolchains in the future.

        + +

        + openmmlab.com +

        + +

        + AUDIO, IMAGE & VIDEO, VISION +

        +
        +
        + +
        +
        + + + +
        + + Flood Segmentation on Sentinel-1 SAR Imagery with Semi-Supervised Learning + +
        +
        Siddha Ganju, Sayak Paul
        +

        Floods wreak havoc throughout the world, causing billions of dollars in damages, and uprooting communities, ecosystems and economies. Aligning flood extent mapping with local topography can provide a plan-of-action that the disaster response team can consider. Thus, remote flood level estimation via satellites like Sentinel-1 can prove to be remedial. The Emerging Techniques in Computational Intelligence (ETCI) competition on Flood Detection tasked participants with predicting flooded pixels after training with synthetic aperture radar (SAR) images in a supervised setting. We use a cyclical approach involving two stages (1) training an ensemble model of multiple UNet architectures with available high and low confidence labeled data and, generating pseudo labels or low confidence labels on the entire unlabeled test dataset, and then, (2) filter out quality generated labels and, (3) combining the generated labels with the previously available high confidence labeled dataset. This assimilated dataset is used for the next round of training ensemble models. This cyclical process is repeated until the performance improvement plateaus. Additionally, we post-process our results with Conditional Random Fields. Our approach sets the second-highest score on the public hold-out test leaderboard for the ETCI competition with 0.7654 IoU. To the best of our knowledge we believe this is one of the first works to try out semi-supervised learning to improve flood segmentation models.

        + +

        + https://github.com/sidgan/ETCI-2021-Competition-on-FLood-Detection +

        + +

        + AUDIO, IMAGE & VIDEO, VISION +

        +
        +
        + +
        +
        + + + +
        + + Real time Speech Enhancement + +
        +
        Xiaoyu Liu, James Wagner, Roy Fejgin, Joan Serra, Santiago Pascual, Cong Zhou, Jordi Pons, Vivek Kumar
        +

        Speech enhancement is a fundamental audio processing task that has experienced a radical change with the advent of deep learning technologies. We will overview the main characteristics of the task and the key principles of existing deep learning solutions. We will be presenting the past and present work done by our group with the overall goal of delivering the best possible intelligibility and sound quality. Finally, we will provide our view on the future of speech enhancement and show how our current long-term research aligns with such a view.

        + +

        + AUDIO, IMAGE & VIDEO, VISION +

        +
        +
        + +
        +
        + + + +
        + + Kornia AI: Low Level Computer Vision for AI + +
        +
        Edgar Riba, Dmytro Mishkin, Jian Shi, Luis Ferraz
        +

        Kornia is a differentiable library that allows classical computer vision to be integrated into deep learning models. It consists of a set of routines and differentiable modules to solve generic computer vision problems. At its core, the package uses PyTorch as its main backend both for efficiency and to take advantage of the reverse-mode auto-differentiation to define and compute the gradient of complex functions.

        + +

        + https://kornia.github.io// +

        + +

        + AUDIO, IMAGE & VIDEO, VISION +

        +
        +
        + +
        +
        + + + +
        + + Video Transformer Network + +
        +
        Daniel Neimark, Omri Bar, Maya Zohar, Dotan Asselmann
        +

        This paper presents VTN, a transformer-based framework for video recognition. Inspired by recent developments in vision transformers, we ditch the standard approach in video action recognition that relies on 3D ConvNets and introduce a method that classifies actions by attending to the entire video sequence information. Our approach is generic and builds on top of any given 2D spatial network. In terms of wall runtime, it trains 16.1× faster and runs 5.1× faster during inference while maintaining competitive accuracy compared to other state-of-the-art methods. It enables whole video analysis, via a single end-to-end pass, while requiring 1.5× fewer GFLOPs. We report competitive results on Kinetics-400 and present an ablation study of VTN properties and the trade-off between accuracy and inference speed. We hope our approach will serve as a new baseline and start a fresh line of research in the video recognition domain. Code and models are available at: https://github.com/bomri/SlowFast/blob/master/projects/vtn/README.md . See paper: https://arxiv.org/abs/2102.00719

        + +

        + https://github.com/bomri/SlowFast/blob/master/projects/vtn/README.md +

        + +

        + AUDIO, IMAGE & VIDEO, VISION +

        +
        +
        + +
        +
        + + + +
        + + DLRT: Ultra Low-Bit Precision Inference Engine for PyTorch on CPU + +
        +
        Dr. Ehsan Saboori, Dr. Sudhakar Sah, MohammadHossein AskariHemmat Saad Ashfaq, Alex Hoffman, Olivier Mastropietro, Davis Sawyer
        +

        The emergence of Deep Neural Networks (DNNs) on embedded and low-end devices holds tremendous potential to expand the adoption of AI technologies to wider audiences. However, making DNNs applicable for inference on such devices using techniques such as quantization and model compression, while maintaining model accuracy, remains a challenge for production deployment. Furthermore, there is a lack of inference engines available in any AI framework to run such low precision networks. Our work presents a novel inference engine and model compression framework that automatically enables PyTorch developers to quantize and run their deep learning models at 2bit and 1bit precision, making them faster, smaller and more energy-efficient in production. DLRT empowers PyTorch developers to unlock advanced AI on low-power CPUs, starting with ARM CPUs and MCUs. This work allows AI researchers and practitioners to achieve 10x faster inference and near-GPU level performance on a fraction of the power and cost.

        + +

        + https://github.com/deeplite +

        + +

        + PERFORMANCE, PRODUCTION & DEPLOYMENT +

        +
        +
        + +
        +
        + + + +
        + + Serving PyTorch Models in Production at Walmart Search + +
        +
        Adway Dhillo, Nidhin Pattaniyil
        +

        This poster is for a data scientist or ML engineer looking to productionalize their pytorch models. It will cover post training steps that should be taken to optimize the model such as quantization and torch script. It will also walk the user in packaging and serving the model through Facebook’s TorchServe. Will also cover benefits of script mode and Pytorch JIT. Benefits of Torch Serve: high performance serving , multi model serving , model version for A/B testing, server side batching, support for pre and post processing

        + +

        + https://pytorch.org/serve/ +

        + +

        + PERFORMANCE, PRODUCTION & DEPLOYMENT +

        +
        +
        + +
        +
        + + + +
        + + CleanRL: high-quality single file implementation of Deep Reinforcement Learning algorithms with research-friendly features + +
        +
        Shengyi Huang, Rousslan Fernand Julien Dossa, Chang Ye, Jeff Braga
        +

        CleanRL is an open-source library that provides high-quality single-file implementations of Deep Reinforcement Learning algorithms. It provides a simpler yet scalable developing experience by having a straightforward codebase and integrating production tools to help interact and scale experiments. In CleanRL, we put all details of an algorithm into a single file, making these performance-relevant details easier to recognize. Additionally, an experiment tracking feature is available to help log metrics, hyperparameters, videos of an agent's gameplay, dependencies, and more to the cloud. Despite succinct implementations, we have also designed tools to help scale, at one point orchestrating experiments on more than 2000 machines simultaneously via Docker and cloud providers.environments. The source code can be found at https://github.com/vwxyzjn/cleanrl.

        + +

        + https://github.com/vwxyzjn/cleanrl/ +

        + +

        + PERFORMANCE, PRODUCTION & DEPLOYMENT +

        +
        +
        + +
        +
        + + + +
        + + Deploying a Food Classifier on PyTorch Mobile + +
        +
        Nidhin Pattaniyil, Reshama Shaikh
        +

        As technology improves, so does the use of training deep learning models. Additionally, since the time spent on mobile devices is greater than on desktop, the demand for applications running natively on mobile devices is also high. This demo will go through a complete example of training a deep learning vision classifier on the Food-101 dataset using PyTorch. We then deploy it on web and mobile using TorchServe and PyTorch Mobile.

        + +

        + https://github.com/npatta01/pytorch-food +

        + +

        + PERFORMANCE, PRODUCTION & DEPLOYMENT +

        +
        +
        + +
        +
        + + + +
        + + Torch-TensorRT: Accelerating Inference Performance Directly from PyTorch using TensorRT + +
        +
        Naren Dasan, Nick Comly, Dheeraj Peri, Anurag Dixit, Abhiram Iyer, Bo Wang, Arvind Sridhar, Boris Fomitchev, Josh Park
        +

        Learn how to accelerate PyTorch inference, from framework, for model deployment. The PyTorch integration for TensorRT makes the performance of TensorRT's GPU optimizations available in PyTorch for any model. We will walk you through how with 3 lines of code you can go from a trained model to optimized TensorRT-embedded TorchScript, ready to deploy to a production environment.

        + +

        + https://github.com/NVIDIA/Torch-TensorRT/ +

        + +

        + PERFORMANCE, PRODUCTION & DEPLOYMENT +

        +
        +
        + +
        +
        + + + +
        + + Tensorized Deep Learning with TensorLy-Torch + +
        +
        Jean Kossaifi
        +

        Most of the data in modern machine learning (e.g. fMRI, videos, etc) is inherently multi-dimensional and leveraging that structure is crucial for good performance. Tensor methods are the natural way to achieve this and can improve deep learning and enable i) large compression ratios through a reduction of the number of parameters, ii) computational speedups, iii) improved performance and iv) better robustness. The TensorLy project provides the tools to manipulate tensors, including tensor algebra, regression and decomposition. TensorLy-Torch builds on top of this and enables tensor-based deep learning by providing out-of-the-box tensor based PyTorch layers that can be readily combined with any deep neural network architecture and takes care of things such as initialization and tensor dropout.

        + +

        + http://tensorly.org/quantum +

        + +

        + PERFORMANCE, PRODUCTION & DEPLOYMENT +

        +
        +
        + +
        +
        + + + +
        + + Catalyst-Accelerated Deep Learning R&D + +
        +
        Sergey Kolesnikov
        +

        Catalyst is a PyTorch framework for Deep Learning Research and Development. It focuses on reproducibility, rapid experimentation, and codebase reuse so you can create something new rather than write yet another train loop.

        + +

        + https://catalyst-team.com/ +

        + +

        + EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING +

        +
        +
        + +
        +
        + + + +
        + + Ray Lightning: Easy Multi-node PyTorch Lightning training + +
        +
        Amog Kamsetty, Richard Liaw, Will Drevo, Michael Galarnyk
        +

        PyTorch Lightning is a library that provides a high-level interface for PyTorch which helps you organize your code and reduce boilerplate. By abstracting away engineering code, it makes deep learning experiments easier to reproduce and improves developer productivity. PyTorch Lightning also includes plugins to easily parallelize your training across multiple GPUs. This parallel training, however, depends on a critical assumption: that you already have your GPU(s) set up and networked together in an efficient way for training. While you may have a managed cluster like SLURM for multi-node training on the cloud, setting up the cluster and its configuration is no easy task. Ray Lightning was created with this problem in mind to make it easy to leverage multi-node training without needing extensive infrastructure expertise. It is a simple and free plugin for PyTorch Lightning with a number of benefits like simple setup, easy scale up, seamless creation of multi-node clusters on AWS/Azure/GCP via the Ray Cluster Launcher, and an integration with Ray Tune for large-scale distributed hyperparameter search and state of the art algorithms

        + +

        + https://github.com/ray-project/ray_lightning +

        + +

        + EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING +

        +
        +
        + +
        +
        + + + +
        + + Supercharge your Federated Learning with Synergos + +
        +
        Jin Howe Teo, Way Yen Chen, Najib Ninaba, Choo Heng Chong Mark
        +

        Data sits as the centerpiece of any machine learning endeavour, yet in many real-world projects, a single party’s data is often insufficient and needs to be augmented with data from other sources. This is unfortunately easier said than done, as there are many innate concerns (be it regulatory, ethical, commercial etc.) stopping parties from exchanging data. Fortunately, there exists an emerging privacy-preserving machine learning technology called Federated Learning. It enables multiple parties holding local data to collaboratively train machine learning models without actually exchanging their data with one another, hence preserving the confidentiality of different parties’ local data.Today, we will be showcasing Synergos, a distributed platform built here at AI Singapore to facilitate the adoption of Federated Learning. Specifically, it strives to make the complex mechanisms involved in any federated endeavour simple, accessible and sustainable.

        + +

        + https://github.com/aimakerspace/synergos_simulator +

        + +

        + EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING +

        +
        +
        + +
        +
        + + + +
        + + AdaptDL: An Open-Source Resource-Adaptive Deep Learning Training and Scheduling Framework + +
        +
        Aurick Qiao, Omkar Pangarkar, Richard Fan
        +

        AdaptDL is an open source framework and scheduling algorithm that directly optimizes cluster-wide training performance and resource utilization. By elastically re-scaling jobs, co-adapting batch sizes and learning rates, and avoiding network interference, AdaptDL improves shared-cluster training compared with alternative schedulers. AdaptDL can automatically determine the optimal number of resources given a job’s need. It will efficiently add or remove resources dynamically to ensure the highest-level performance. The AdaptDL scheduler will automatically figure out the most efficient number of GPUs to allocate to your job, based on its scalability. When the cluster load is low, your job can dynamically expand to take advantage of more GPUs. AdaptDL offers an easy-to-use API to make existing PyTorch training code elastic with adaptive batch sizes and learning rates. We have also ported AdaptDL to Ray/Tune which can automatically scale trials of an Experiment and can be used to schedule stand-alone PyTorch training jobs on the cloud in a cost-effective way.

        + +

        + https://github.com/petuum/adaptdl +

        + +

        + EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING +

        +
        +
        + +
        +
        + + + +
        + + Define-by-run quantization + +
        +
        Vasiliy Kuznetsov, James Reed, Jerry Zhang
        +

        Describes a prototype PyTorch workflow to perform quantization syntax transforms in Eager mode with: * no model changes needed (compared to Eager mode which requires manual quant/dequant insertion and fusion) * almost no model syntax restrictions (compared to FX graph mode which requires symbolic traceability)

        + +

        + https://pytorch.org/docs/stable/quantization.html +

        + +

        + EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING +

        +
        +
        + +
        +
        + + + +
        + + Fx Numeric Suite Core APIs + +
        +
        Charles Hernandez, Vasiliy Kuznetzov, Haixin Liu
        +

        wrong when it doesn't satisfy the accuracy we expect. Debugging the accuracy issue of quantization is not easy and time consuming. The Fx Numeric Suite Core APIs allows users to better diagnose the source of their quantization error for both statically and dynamically quantized modelsThis poster gives an overview of the core APIs and techniques available to users through the Fx Numeric Suite, and how they can use them to improve quantization performance.

        + +

        + EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING +

        +
        +
        + +
        +
        + + + +
        + + snnTorch: Training spiking neural networks using gradient-based optimization + +
        +
        J.K. Eshraghian, M. Ward, E.O. Neftci, G. Lenz, X. Wang, G. Dwivedi, M. Bennamoun, D.S. Jeong, W.D. Lu
        +

        The brain is the perfect place to look for inspiration to develop more efficient neural networks. One of the main differences with modern deep learning is that the brain encodes and processes information as spikes rather than continuous activations. Combining the training methods intended for neural networks with the sparse, spiking activity inspired by biological neurons has shown the potential to improve the power efficiency of training and inference by several orders of magnitude. snnTorch is a Python package for performing gradient-based learning with spiking neural networks. It extends the capabilities of PyTorch, taking advantage of its GPU accelerated tensor computation and applying it to networks of event-driven spiking neurons. snnTorch is designed to be intuitively used with PyTorch, as though each spiking neuron were simply another activation in a sequence of layers. It is therefore agnostic to fully-connected layers, convolutional layers, residual connections, etc. The classical challenges that have faced the neuromorphic engineering community, such as the non-differentiability of spikes, the dead neuron problem, vanishing gradients in backpropagation-through-time, are effectively solved in snnTorch and enable the user to focus on building applications that leverage sparsity and event-driven data streams.

        + +

        + https://snntorch.readthedocs.io/en/latest/ +

        + +

        + EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING +

        +
        +
        + +
        +
        + + + +
        + + PyTorch for R + +
        +
        Daniel Falbel
        +

        Last year the PyTorch for the R language project has been released allowing R users to benefit of PyTorch's speed and flexibility. Since then we have a growing community of contributors that are both improving the torch for R interface, building research and products on top of it and using it to teach deep learning methods. In this poster we will showcase what are the past and current developments in the PyTorch for R project as well as what are our plans for the future.

        + +

        + https://torch.mlverse.org/ +

        + +

        + EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING +

        +
        +
        + +
        +
        + + + +
        + + ocaml-torch and tch-rs: writing and using PyTorch models using OCaml or Rust + +
        +
        Laurent Mazare
        +

        The main front-end for using PyTorch is its Python API, however LibTorch provides a lower level C++ API to manipulate tensors, perform automatic differentiation, etc. ocaml-torch and tch-rs are two open-source projects providing wrappers for this C++ API respectively in OCaml and Rust. Users can then write OCaml and Rust code to create new models, perform inference and training, and benefit from the guarantees provided by strongly typed programming languages and functional programming. They can also use TorchScript to leverage existing Python models. The libraries provide various examples, ranging from the main computer vision models to a minimalist GPT implementation. The main challenges for these bindings are to provide idiomatic APIs adapted to the languages specificities; to automatically generate most of the bindings code as there are thousands of C++ functions to expose; and to interact properly with the memory models for each language.

        + +

        + https://github.com/laurentMazare/ocaml-torch +

        + +

        + EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING +

        +
        +
        + +
        +
        + + + +
        + + PyTorch Lightning Flash - Your PyTorch AI Factory + +
        +
        Ari Bornstein
        +

        Flash is a high-level deep learning framework for fast prototyping, baselining, finetuning and solving deep learning problems. It features a set of tasks for you to use for inference and finetuning out of the box, and an easy to implement API to customize every step of the process for full flexibility. Flash is built for beginners with a simple API that requires very little deep learning background, and for data scientists, Kagglers, applied ML practitioners and deep learning researchers that want a quick way to get a deep learning baseline with advanced features PyTorch Lightning offers. Flash enables you to easily configure and run complex AI recipes for over 15 tasks across 7 data domains

        + +

        + https://github.com/PyTorchLightning +

        + +

        + EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING +

        +
        +
        + +
        +
        + + + +
        + + PyTorch-Ignite: Training and evaluating neural networks flexibly and transparently + +
        +
        Victor Fomin, Taras Savchyn, Priyansi
        +

        PyTorch-Ignite is a high-level library to help with training and evaluating neural networks in PyTorch flexibly and transparently. PyTorch-Ignite is designed to be at the crossroads of high-level Plug & Play features and under-the-hood expansion possibilities. The tool aims to improve the deep learning community's technical skills by promoting best practices where things are not hidden behind a divine tool that does everything, but remain within the reach of users. PyTorch-Ignite differs from other similar tools by allowing users to compose their applications without being focused on a super multi-purpose object, but rather on weakly coupled components allowing advanced customization.

        + +

        + https://pytorch-ignite.ai/ecosystem/ +

        + +

        + EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING +

        +
        +
        + +
        +
        + + + +
        + + Benchmarking the Accuracy and Robustness of Feedback Alignment Methods + +
        +
        Albert Jimenez, Mohamed Akrout
        +

        Backpropagation is the default algorithm for training deep neural networks due to its simplicity, efficiency and high convergence rate. However, its requirements make it impossible to be implemented in a human brain. In recent years, more biologically plausible learning methods have been proposed. Some of these methods can match backpropagation accuracy, and simultaneously provide other extra benefits such as faster training on specialized hardware (e.g., ASICs) or higher robustness against adversarial attacks. While the interest in the field is growing, there is a necessity for open-source libraries and toolkits to foster research and benchmark algorithms. In this poster, we present BioTorch, a software framework to create, train, and benchmark biologically motivated neural networks. In addition, we investigate the performance of several feedback alignment methods proposed in the literature, thereby unveiling the importance of the forward and backward weight initialization and optimizer choice. Finally, we provide a novel robustness study of these methods against state-of-the-art white and black-box adversarial attacks.

        + +

        + https://github.com/jsalbert/biotorch +

        + +

        + EXTENDING PYTORCH, APIs, PARALLEL & DISTRIBUTED TRAINING +

        +
        +
        + +
        +
        + + + +
        + + Salina: Easy programming of Sequential Decision Learning and Reinforcement Learning Models in pytorch + +
        +
        Ludovic Denoyer, Alfredo de la Fuente, Song Duong, Jean-Baptiste Gaya, Pierre-Alexandre Kamienny, Daniel H. Thompson
        +

        salina is a lightweight library extending PyTorch modules for the development of sequential decision models. It can be used for Reinforcement Learning (including model-based with differentiable environments, multi-agent RL, ...), but also in a supervised/unsupervised learning settings (for instance for NLP, Computer Vision, etc..).

        + +

        + https://github.com/facebookresearch/salina +

        + +

        + ML Ops, MODELS, MODEL OPTIMIZATION & INTERPRETABILITY +

        +
        +
        + +
        +
        + + + +
        + + Structured and Unstructured Pruning Workflow in PyTorch + +
        +
        Zafar Takhirov, Karen Zhou, Raghuraman Krishnamoorthi
        +

        Two new toolflows for model pruning are introduced: Sparsifier and Pruner, which enable unstructured and structured pruning of the model weights respectively. The toolflow can be combined with other optimization techniques, such as quantization to achieve even higher levels of model compression. In addition to that, the "Pruner" toolflow can also be used for "shape propagation", where the physical structure of the model is modified after structured pruning (in FX graph mode only).This poster gives a high-level overview of the prototype API, usage example, currently supported sparse quantized kernels, as well as provides a brief overview of future plans

        + +

        + https://github.com/pytorch/pytorch +

        + +

        + ML Ops, MODELS, MODEL OPTIMIZATION & INTERPRETABILITY +

        +
        +
        + +
        +
        + + + +
        + + Torch-CAM: class activation explorer + +
        +
        François-Guillaume Fernandez
        +

        One of the core inconveniences of Deep Learning comes from its interpretability, which remains obscure for most non-basic convolutional models. Their very performances are granted by optimization processes that have high degrees of freedom and no constraints on explainability. Fortunately, modern frameworks mechanisms grant access to information flow in their components, which paved the way to building intuition around result interpretability in CNN models. The main contributions of the author are described as follows: - building a flexible framework for class activation computation - providing high-quality implementations of most popular methods - making these methods usable by entry users as well as researchers The open-source project is available here: https://github.com/frgfm/torch-cam

        + +

        + https://github.com/frgfm/torch-cam +

        + +

        + ML Ops, MODELS, MODEL OPTIMIZATION & INTERPRETABILITY +

        +
        +
        + +
        +
        + + + +
        + + moai: A Model Development Kit to Accelerate Data-driven Workflows + +
        +
        Nikolaos Zioulis
        +

        moai is a PyTorch-based AI Model Development Kit (MDK) that seeks to improve data-driven model workflows, design and understanding. It relies on hydra for handling configuration and lightning for handling infrastructure. As a kit, It offers a set of actions to `train` or `evaluate` models using the corresponding actions which consume configuration files. Apart from the definition of the model, data, training scheme, optimizer, visualization and logging, these configuration files additionally use named tensors to define tensor processing graphs. These are created by chaining various building blocks called monads, which are functional units or otherwise single responsibility modules. Monad parameters and input/output tensors are defined on the configuration file, allowing for the entire model to be summarized into a single file. This opens up novel functionalities like querying for inter-model differences using the `diff` action, or aggregating the results of multiple models using the `plot` action which uses hiplot to compare models in various ways. moai facilitates high quality reproduction (using the `reprod` action), as apart from automatically handling all boilerplate related to it, it standardizes the process of developing modules/monads and implicitly logs all hyperparameters. Even though no code is required, moai exploits python’s flexibility to allow developers to integrate their own code into its engine from external projects, vastly increasing their productivity.

        + +

        + https://github.com/ai-in-motion/moai +

        + +

        + ML Ops, MODELS, MODEL OPTIMIZATION & INTERPRETABILITY +

        +
        +
        + +
        +
        + + + +
        + + Building Production ML Pipelines for PyTorch Models + +
        +
        Vaibhav Singh, Rajesh Thallam, Jordan Totten, Karl Weinmeister
        +

        Machine Learning Operationalization has rapidly evolved in the last few years with a growing set of tools for each phase of development. From experimentation to automated model analysis and deployment, each of these tools offer some unique capabilities. In this work we survey a slice of these tools and demonstrate an opinionated example of an end to end CI/CD pipeline for PyTorch model development and deployment using Vertex AI SDK. The goal of this session is to aid an informed conversation on the choices available to PyTorch industry practitioners who are looking to operationalize their ML models, and to researchers who are simply trying to organize their experiments. Although our implementation example will make tool choices at various stages, we will be focused on ML design patterns that are applicable to a wide variety of commercial and open-source offerings.

        + +

        + https://github.com/GoogleCloudPlatform/vertex-ai-samples +

        + +

        + ML Ops, MODELS, MODEL OPTIMIZATION & INTERPRETABILITY +

        +
        +
        + +
        +
        + + + +
        + + Customizing MLOps pipelines with JSON-AI: a declarative syntax to streamline ML in the database + +
        +
        George Hosu, Particio Cerda-Mardini, Natasha Seelam, Jorge Torres
        +

        Nearly 64% of companies take over a month to a year to deploy a single machine learning (ML) model into production [1]. Many of these companies cite key challenges integrating with complex ML frameworks as a root cause [1], as there is still a gap between where data lives, how models are trained, and how downstream applications access predictions from models [1, 2]. MindsDB is a PyTorch-based ML platform that aims to solve fundamental MLOps challenges by abstracting ML models as “virtual tables”, allowing models to be queried in the same natural way users work with data in databases. As data is diverse and varied, we recently developed an open-source declarative syntax, named “JSON-AI” to allow others to customize ML model internals without changing source code. We believe that the key elements of the data science (DS)/ML pipeline, namely data pre-processing/cleaning, feature engineering, and model-building [2], should be automated in a robust, reliable, and reproducible manner with simplicity. JSON-AI allows you refined control of each of these steps, and enables users to bring custom routines into their ML pipeline. In our poster, we will show how a user interfaces with JSON-AI to bring original approaches to each of the aforementioned parts of the DS/ML, along with control over analysis and explainability tools. [1] Algorithmia (2021). 2021 state of enterprise machine learning [2] “How Much Automation Does a Data Scientist Want?” ArXiV (2021)

        + +

        + https://github.com/mindsdb/mindsdb/ +

        + +

        + ML Ops, MODELS, MODEL OPTIMIZATION & INTERPRETABILITY +

        +
        +
        + +
        +
        + + + +
        + + TorchStudio, a full featured IDE for PyTorch + +
        +
        Robin Lobel
        +

        TorchStudio is an open-source, full-featured IDE for PyTorch. It aims to simplify the creation, training and iterations of AI models. It can load, analyze and explore datasets from the TorchVision or TorchAudio categories, or custom datasets with any format and number of inputs and outputs. TorchVision, TorchAudio or custom models can then be loaded or written from scratch, debugged, visualized as a graph, and trained using local hardware, a distant server or GPUs in the cloud. Trainings can then be compared in the dashboard with several analyzing tools to help you identify the best performing set of models and hyper parameters and export it as TorchScript or ONNX files. TorchStudio is also highly customizable, with 90% of its functionalities accessible as open source scripts and independent modules, to fit as many AI scenario as possible.

        + +

        + https://torchstudio.ai/ +

        + +

        + ACCELERATORS, TOOLS, LIBRARY, DATA +

        +
        +
        + +
        +
        + + + +
        + + Accelerate TorchServe with Intel Extension for PyTorch + +
        +
        Mark Saroufim, Hamid Shojanazeri, Patrick Hu, Geeta Chauhan, Jing Xu, Jianan Gu, Jiong Gong, Ashok Emani, Eikan Wang, Min Jean Cho, Fan Zhao
        +

        Accelerate TorchServe with IntelÂź Extension for PyTorch: Intel is collaborating with Meta to take advantage of performance boosting from IntelÂź Extension for PyTorch* from TorchServe, so that users can easily deploy their PyTorch models with out of the box satisfying performance. With these SW advancements, we demonstrated ease-of-use IPEX user-facing API, and we also showcased speed-up with IntelÂź Extension for PyTorch* FP32 inference with the stock PyTorch and speed-up with IntelÂź Extension for PyTorch* INT8 inference with the stock PyTorch.

        + +

        + www.intel.com/Performanceindex +

        + +

        + ACCELERATORS, TOOLS, LIBRARY, DATA +

        +
        +
        + +
        +
        + + + +
        + + Kaolin Library + +
        +
        Clement Fuji Tsang, Jean-Francois Lafleche, Charles Loop, Masha Shugrina, Towaki Takikawa, Jiehan Wang
        +

        NVIDIA Kaolin is a suite of tools for accelerating 3D Deep Learning research. The Kaolin library provides a PyTorch API for working with a variety of 3D representations and includes a growing collection of GPU-optimized operations such as modular differentiable rendering, fast conversions between representations, loss functions, data loading, 3D checkpoints and more. The library also contains a lightweight 3D visualizer Dash3D and can work with an Omniverse companion app for dataset/checkpoint visualization and synthetic data generation.

        + +

        + ACCELERATORS, TOOLS, LIBRARY, DATA +

        +
        +
        + +
        +
        + + + +
        + + Accelerate PyTorch training with Cloud TPUs + +
        +
        Jack Cao, Milad Mohammadi, Zak Stone, Vaibhav Singh, Calvin Pelletier, Shauheen Zahirazami
        +

        PyTorch / XLA offers PyTorch users the ability to train their models on XLA devices including Cloud TPUs. This compiled path often makes it possible to utilize creative optimizations and achieve top performance on target XLA devices. With the introduction of Cloud TPU VMs, users have direct access to TPU host machines and therefore a great level of flexibility. In addition, TPU VMs make debugging easier and reduce data transfer overheads. Google has also recently announced the availability of Cloud TPU v4 Pods, which are exaflop-scale supercomputers for machine learning. Cloud TPU v4 Pods offer a whole new level of performance for large-scale PyTorch / XLA training of ML models.

        + +

        + ACCELERATORS, TOOLS, LIBRARY, DATA +

        +
        +
        + +
        +
        + + + +
        + + Accelerating PyTorch on the largest chip ever built (WSE) + +
        +
        Antonio Kim, Behzad Abghari, Chris Oliver, Cynthia Liu, Mark Browning, Vishal Subbiah, Kamran Jafari, Emad Barsoum, Jessica Liu, Sean Lie
        +

        The Cerebras Wafer Scale Engine (WSE) is the largest processor ever built, dedicated to accelerating deep learning model for training and inference. A single chip in a single CS-2 system provides the compute power of a cluster of GPUs but acts as a single processor, making it also much simpler to use. We present the current PyTorch backend architecture for the Cerebras CS-2 and how we go all the way from PyTorch to laying out the model graph on the wafer. Additionally, we will discuss the advantages of training on Cerebras hardware and its unique capabilities.

        + +

        + https://cerebras.net +

        + +

        + ACCELERATORS, TOOLS, LIBRARY, DATA +

        +
        +
        + +
        + + + + + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/pted/2021.html b/ecosystem/pted/2021.html index 95453eba07eb..538fd37186d6 100644 --- a/ecosystem/pted/2021.html +++ b/ecosystem/pted/2021.html @@ -1,12 +1,118 @@ ---- -layout: default -title: Ecosystem Day 2021 -permalink: ecosystem/pted/2021 -background-class: features-background -body-class: ecosystem ---- - -
        + + + + + + + + + + + + + Ecosystem Day 2021 | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +

        PyTorch Ecosystem Day

        2021

        @@ -55,43 +161,2068 @@

        2021


        Posters

        - {% for poster in site.data.ecosystem.pted['2021'].posters %} +
        - {% if poster.poster_link %} - - {% endif %} +
        - {% if poster.poster_link %} - {{ poster.title }} - {% else %} {{ poster.title }} {% endif %} + + Bring quantum machine learning to PyTorch with PennyLane +
        -
        {{ poster.authors | join: ", "}}
        -

        {{ poster.description }}

        - {% if poster.link %} +
        Josh Izaac, Thomas Bromley
        +

        PennyLane allows you to train quantum circuits just like neural networks!, This poster showcases how PennyLane can be interfaced with PyTorch to enable training of quantum and hybrid machine learning models. The outputs of a quantum circuit are provided as a Torch tensor with a defined gradient. We highlight how this functionality can be used to explore new paradigms in machine learning, including the use of hybrid models for transfer learning.

        +

        - {{ poster.link }} + http://pennylane.ai

        - {% endif %} +

        - {{ poster.categories }} + Platform, Ops & Tools

        - {% endfor %} -
        -
        -
        -
        - +Thus, pre-trained models are often used as-is when a researcher wants to experiment only with a specific facet of a problem. See, as examples, FastAI's work into optimizers, schedulers, and gradual training through pre-trained residual models, or NLP projects with Hugging Face models as their backbone. + +We think that, for many of these problems, we can automatically generate a "good enough" model and data-processing pipeline from just the raw data and the endpoint. To address this situation, we are developing MindsDB, an open-source, PyTorch-based ML platform that works inside databases via SQL commands. It is built with a modular approach, and in this talk we are going to focus on Lightwood, the stand-alone core component that performs machine learning automation on top of the PyTorch framework. + +Lightwood automates model building into 5 stages: (1) classifying each feature into a "data type", (2) running statistical analyses on each column of a dataset, (3) fitting multiple models to normalize, tokenize, and generate embeddings for each feature, (4) deploying the embeddings to fit a final estimator, and (5) running an analysis on the final ensemble to evaluate it and generate a confidence model. It can generate quick "baseline" models to benchmark performance for any custom encoder representation of a data type and can also serve as scaffolding for investigating new hypotheses (architectures, optimizers, loss-functions, hyperparameters, etc). + +We aim to present our benchmarks covering wide swaths of problem types and illustrate how Lightwood can be useful for researchers and engineers through a hands-on demo.

        + +

        + https://mindsdb.com +

        + +

        + Database & AI Accelerators +

        + + + +
        +
        + + + +
        + + PyTorch on Supercomputers Simulations and AI at Scale with SmartSim + +
        +
        Sam Partee , Alessandro Rigazzi, Mathew Ellis, Benjamin Rob
        +

        SmartSim is an open source library dedicated to enabling online analysis and Machine Learning (ML) for traditional High Performance Computing (HPC) simulations. Clients are provided in common HPC simulation languages, C/C++/Fortran, that enable simulations to perform inference requests in parallel on large HPC systems. SmartSim utilizes the Redis ecosystem to host and serve PyTorch models alongside simulations. We present a use case of SmartSim where a global ocean simulation, used in climate modeling, is augmented with a PyTorch model to resolve quantities of eddy kinetic energy within the simulation.

        + +

        + https://github.com/CrayLabs/SmartSim +

        + +

        + Database & AI Accelerators +

        +
        +
        + +
        +
        + + + +
        + + Model agnostic confidence estimation with conformal predictors for AutoML + +
        +
        Patricio Cerda-Mardini, Natasha Seelam
        +

        Many domains leverage the extraordinary predictive performance of machine learning algorithms. However, there is an increasing need for transparency of these models in order to justify deploying them in applied settings. Developing trustworthy models is a great challenge, as they are usually optimized for accuracy, relegating the fit between the true and predicted distributions to the background [1]. This concept of obtaining predicted probability estimates that match the true likelihood is also known as calibration. + +Contemporary ML models generally exhibit poor calibration. There are several methods that aim at producing calibrated ML models [2, 3]. Inductive conformal prediction (ICP) is a simple yet powerful framework to achieve this, offering strong guarantees about the error rates of any machine learning model [4]. ICP provides confidence scores and turns any point prediction into a prediction region through nonconformity measures, which indicate the degree of inherent strangeness a data point presents when compared to a calibration data split. + +In this work, we discuss the integration of ICP with MindsDB --an open source AutoML framework-- successfully replacing its existing quantile loss approach for confidence estimation capabilities. +Our contribution is threefold. First, we present a study on the effect of a "self-aware" neural network normalizer in the width of predicted region sizes (also known as efficiency) when compared to an unnormalized baseline. Our benchmarks consider results for over 30 datasets of varied domains with both categorical and numerical targets. Second, we propose an algorithm to dynamically determine the confidence level based on a target size for the predicted region, effectively prioritizing efficiency over a minimum error rate. Finally, we showcase the results of a nonconformity measure specifically tailored for small datasets. + +References: +[1] Guo, C., Pleiss, G., Sun, Y., & Weinberger, K.Q. (2017). On Calibration of Modern Neural Networks. ArXiv, abs/1706.04599. +[2] Naeini, M., Cooper, G., & Hauskrecht, M. (2015). Obtaining Well Calibrated Probabilities Using Bayesian Binning. Proceedings of the AAAI Conference on Artificial Intelligence. AAAI Conference on Artificial Intelligence, 2015, 2901-2907 . +[3] Maddox, W., Garipov, T., Izmailov, P., Vetrov, D., & Wilson, A. (2019). A Simple Baseline for Bayesian Uncertainty in Deep Learning. NeurIPS. +[4] Papadopoulos, H., Vovk, V., & Gammerman, A. (2007). Conformal Prediction with Neural Networks. 19th IEEE International Conference on Tools with Artificial Intelligence (ICTAI 2007), 2, 388-395.

        + +

        + https://mindsdb.com +

        + +

        + Database & AI Accelerators +

        +
        +
        + +
        +
        + + + +
        + + Enabling PyTorch on AMD Instinctℱ GPUs with the AMD ROCmℱ Open Software Platform + +
        +
        Derek Bouius
        +

        AMD Instinct GPUs are enabled with the upstream PyTorch repository via the ROCm open software platform. Now users can also easily download the installable Python package, built from the upstream PyTorch repository and hosted on pytorch.org. Notably, it includes support for distributed training across multiple GPUs and supports accelerated mixed precision training. AMD also provides hardware support for the PyTorch community build to help develop and maintain new features. This poster will highlight some of the work that has gone into enabling PyTorch support.

        + +

        + https://www.amd.com/rocm +

        + +

        + Database & AI Accelerators +

        +
        +
        + +
        +
        + + + +
        + + DeepSpeed: Shattering barriers of deep learning speed & scale + +
        +
        DeepSpeed Team Microsoft Corporation
        +

        In the poster (and a talk during the breakout session), we will present three aspects of DeepSpeed (https://github.com/microsoft/DeepSpeed), a deep learning optimization library based on PyTorch framework: 1) How we overcome the GPU memory barrier by ZeRO-powered data parallelism. 2) How we overcome the network bandwidth barrier by 1-bit Adam and 1-bit Lamb compressed optimization algorithms. 3) How we overcome the usability barrier by integration with Azure ML, HuggingFace, and PyTorch Lightning.

        + +

        + +

        + +

        + Distributed Training +

        +
        +
        + +
        +
        + +
        + Dask PyTorch DDP: A new library bringing Dask parallelization to PyTorch training +
        +
        Stephanie Kirmer, Hugo Shi
        +

        We have developed a library that helps simplify the task of multi-machine parallel training for PyTorch models, bringing together the power of PyTorch DDP with Dask for parallelism on GPUs. Our poster describes the library and its core function, and demonstrates how the multi-machine training process works in practice.

        + +

        + https://github.com/saturncloud/dask-pytorch-ddp +

        + +

        + Distributed Training +

        +
        +
        + +
        +
        + + + +
        + + Optimising Physics Informed Neural Networks. + +
        +
        Vignesh Gopakumar
        +

        Solving PDEs using Neural Networks are often ardently laborious as it requires training towards a well-defined solution, i.e. global minima for a network architecture - objective function combination. For a family of complex PDEs, Physics Informed neural networks won't offer much in comparison to traditional numerical methods as their global minima becomes more and more intractable. We propose a modified approach that hinges on continual and parametrised learning that can create more general PINNs that can solve for a variety of PDE scenarios rather than solving for a well-defined case. We believe that this brings Neural Network based PDE solvers in comparison to numerical solvers.

        + +

        + +

        + +

        + Distributed Training +

        +
        +
        + +
        +
        + + + +
        + + FairScale-A general purpose modular PyTorch library for high performance and large scale training + +
        +
        Mandeep Baines, Shruti Bhosale, Vittorio Caggiano, Benjamin Lefaudeux, Vitaliy Liptchinsky, Naman Goyal, Siddhardth Goyal, Myle Ott, Sam Sheifer, Anjali Sridhar, Min Xu
        +

        FairScale is a library that extends basic PyTorch capabilities while adding new SOTA techniques for high performance and large scale training on one or multiple machines. FairScale makes available the latest distributed training techniques in the form of composable modules and easy to use APIs. + +Machine Learning (ML) training at scale traditionally means data parallelism to reduce training time by using multiple devices to train on larger batch size. Nevertheless, with the recent increase of ML models sizes data parallelism is no longer enough to satisfy all "scaling" needs. FairScale provides several options to overcome some of the limitations to scale. + +For scaling training that is bottlenecked by memory (optimizer state, intermediate activations, parameters), FairScale provides APIs that have implemented optimizer, gradient and parameter sharding. This will allow users to train large models using devices in a more memory efficient manner. + +To overcome the memory required for large models FairScale provides various flavors of pipeline and model parallelism, MOE (Mixture Of Experts) layer, and Offload models. Those methods allow to perform computation only of shards of the models across multiple devices with micro batches of data to maximize device efficiency. + +FairScale also provides modules to aid users to scale batch size effectively without changing their existing learning rate hyperparameter - AdaScale - and save memory with checkpoint activation of intermediate layers. + +FairScale has also been integrated into Pytorch Lightening, HuggingFace, FairSeq, VISSL, and MMF to enable users of those frameworks to take advantage of its features.

        + +

        + +

        + +

        + Distributed Training +

        +
        +
        + +
        +
        + + + +
        + + AdaptDL: An Open-Source Resource-Adaptive Deep Learning Training/Scheduling Framework + +
        +
        Aurick Qiao, Sang Keun Choe, Suhas Jayaram Subramanya, Willie Neiswanger, Qirong Ho, Hao Zhang, Gregory R. Ganger, Eric P. Xing
        +

        AdaptDL is an open source framework and scheduling algorithm that directly optimizes cluster-wide training performance and resource utilization. By elastically re-scaling jobs, co-adapting batch sizes and learning rates, and avoiding network interference, AdaptDL improves shared-cluster training compared with alternative schedulers. AdaptDL can automatically determine the optimal number of resources given a job's need. It will efficiently add or remove resources dynamically to ensure the highest-level performance. The AdaptDL scheduler will automatically figure out the most efficient number of GPUs to allocate to your job, based on its scalability. When the cluster load is low, your job can dynamically expand to take advantage of more GPUs. AdaptDL offers an easy-to-use API to make existing PyTorch training code elastic with adaptive batch sizes and learning rates. +Showcase: Distributed training and Data Loading

        + +

        + +

        + +

        + Distributed Training +

        +
        +
        + +
        +
        + +
        + Accelerate PyTorch large model training with ONNX Runtime: just add one line of code! +
        +
        Natalie Kershaw
        +

        As deep learning models, especially transformer models get bigger and bigger, reducing training time becomes both a financial and environmental imperative. ONNX Runtime can accelerate large-scale distributed training of PyTorch transformer models with a one-line code change (in addition to import statements ;-)) Adding in the DeepSpeed library improves training speed even more. + +With the new ORTModule API, you wrap an existing torch.nn.Module, and have us automatically: export the model as an ONNX computation graph; compile and optimize it with ONNX Runtime; and integrate it into your existing training script. + +In this poster, we demonstrate how to fine-tune a popular HuggingFace model and show the performance improvement, on a multi-GPU cluster in the Azure Machine Learning cloud service.

        + +

        + https://aka.ms/pytorchort +

        + +

        + Distributed Training +

        +
        +
        + +
        +
        + + + +
        + + PyTorch/XLA with new Cloud TPU VMs and Profiler + +
        +
        Jack Cao, Daniel Sohn, Zak Stone, Shauheen Zahirazami
        +

        PyTorch / XLA enables users to train PyTorch models on XLA devices including Cloud TPUs. Cloud TPU VMs now provide direct access to TPU host machines and hence offer much greater flexibility in addition to making debugging easier and reducing data transfer overheads. PyTorch / XLA has now full support for this new architecture. A new profiling tool has also been developed to enable better profiling of PyTorch / XLA. These improvements not only make it much easier to develop models but also reduce the cost of large-scale PyTorch / XLA training runs on Cloud TPUs.

        + +

        + http://goo.gle/pt-xla-tpuvm-signup +

        + +

        + Distributed Training +

        +
        +
        + +
        +
        + + + +
        + + PyTorch Lightning: Deep Learning without the Boilerplate + +
        +
        Ari Bornstein
        +

        PyTorch Lightning reduces the engineering boilerplate and resources required to implement state-of-the-art AI. Organizing PyTorch code with Lightning enables seamless training on multiple-GPUs, TPUs, CPUs, and the use of difficult to implement best practices such as model sharding, 16-bit precision, and more, without any code changes. In this poster, we will use practical Lightning examples to demonstrate how to train Deep Learning models with less boilerplate.

        + +

        + https://www.pytorchlightning.ai/ +

        + +

        + Frontend & Experiment Manager +

        +
        +
        + +
        +
        + + + +
        + + Accelerate PyTorch with IPEX and oneDNN using Intel BF16 Technology + +
        +
        Jiong Gong, Nikita Shustrov, Eikan Wang, Jianhui Li, Vitaly Fedyunin
        +

        Intel and Facebook collaborated to enable BF16, a first-class data type in PyTorch, and a data type that are accelerated natively with the 3rd Gen IntelÂź XeonÂź scalable processors. This poster introduces the latest SW advancements added in Intel Extension for PyTorch (IPEX) on top of PyTorch and the oneAPI DNN library for ease-of-use and high-performance BF16 DL compute on CPU. With these SW advancements, we demonstrated ease-of-use IPEX user-facing API, and we also showcased 1.55X-2.42X speed-up with IPEX BF16 training over FP32 with the stock PyTorch and 1.40X-4.26X speed-up with IPEX BF16 inference over FP32 with the stock PyTorch.

        + +

        + https://github.com/intel/intel-extension-for-pytorch +

        + +

        + Frontend & Experiment Manager +

        +
        +
        + +
        +
        + + + +
        + + TorchStudio, a machine learning studio software based on PyTorch + +
        +
        Robin Lobel
        +

        TorchStudio is a standalone software based on PyTorch and LibTorch. It aims to simplify the creation, training and iterations of PyTorch models. It runs locally on Windows, Ubuntu and macOS. It can load, analyze and explore PyTorch datasets from the TorchVision or TorchAudio categories, or custom datasets with any number of inputs and outputs. PyTorch models can then be loaded and written from scratch, analyzed, and trained using local hardware. Trainings can be run simultaneously and compared to identify the best performing models, and export them as a trained TorchScript or ONNX model.

        + +

        + https://torchstudio.ai/ +

        + +

        + Frontend & Experiment Manager +

        +
        +
        + +
        +
        + + + +
        + + Hydra Framework + +
        +
        Jieru Hu, Omry Yadan
        +

        Hydra is an open source framework for configuring and launching research Python applications. Key features: - Compose and override your config dynamically to get the perfect config for each run - Run on remote clusters like SLURM and AWS without code changes - Perform basic greed search and hyper parameter optimization without code changes - Command line tab completion for your dynamic config And more.

        + +

        + +

        + +

        + Frontend & Experiment Manager +

        +
        +
        + +
        +
        + + + +
        + + PyTorch-Ignite: training common things easy and the hard things possible + +
        +
        Victor Fomin, Sylvain Desroziers, Taras Savchyn
        +

        This poster intends to give a brief but illustrative overview of what PyTorch-Ignite can offer for Deep Learning enthusiasts, professionals and researchers. Following the same philosophy as PyTorch, PyTorch-Ignite aims to keep it simple, flexible and extensible but performant and scalable. Throughout this poster, we will introduce the basic concepts of PyTorch-Ignite, its API and features it offers. We also assume that the reader is familiar with PyTorch.

        + +

        + +

        + +

        + Frontend & Experiment Manager +

        +
        +
        + +
        +
        + + + +
        + + Farabio - Deep Learning Toolkit for Biomedical Imaging + +
        +
        Sanzhar Askaruly, Nurbolat Aimakov, Alisher Iskakov, Hyewon Cho
        +

        Deep learning has transformed many aspects of industrial pipelines recently. Scientists involved in biomedical imaging research are also benefiting from the power of AI to tackle complex challenges. Although the academic community has widely accepted image processing tools, such as scikit-image, ImageJ, there is still a need for a tool which integrates deep learning into biomedical image analysis. We propose a minimal, but convenient Python package based on PyTorch with common deep learning models, extended by flexible trainers and medical datasets.

        + +

        + https://github.com/tuttelikz/farabio +

        + +

        + Medical & Healthcare +

        +
        +
        + +
        +
        + + + +
        + + MONAI: A Domain Specialized Library for Healthcare Imaging + +
        +
        Michael Zephyr, Prerna Dogra Richard Brown, Wenqi Li, Eric Kerfoot
        +

        Healthcare image analysis for both radiology and pathology is increasingly being addressed with deep-learning-based solutions. These applications have specific requirements to support various imaging modalities like MR, CT, ultrasound, digital pathology, etc. It is a substantial effort for researchers in the field to develop custom functionalities to handle these requirements. Consequently, there has been duplication of effort, and as a result, researchers have incompatible tools, which makes it hard to collaborate. + +MONAI stands for Medical Open Network for AI. Its mission is to accelerate the development of healthcare imaging solutions by providing domain-specialized building blocks and a common foundation for the community to converge in a native PyTorch paradigm.

        + +

        + https://monai.io/ +

        + +

        + Medical & Healthcare +

        +
        +
        + +
        +
        + + + +
        + + How theator Built a Continuous Training Framework to Scale Up Its Surgical Intelligence Platform + +
        +
        Shai Brown, Daniel Neimark, Maya Zohar, Omri Bar, Dotan Asselmann
        +

        Theator is re-imagining surgery with a Surgical Intelligence platform that leverages highly advanced AI, specifically machine learning and computer vision technology, to analyze every step, event, milestone, and critical junction of surgical procedures. + +Our platform analyzes lengthy surgical procedure videos and extracts meaningful information, providing surgeons with highlight reels of key moments in an operation, enhanced by annotations. + +As the team expanded, we realized that we were spending too much time manually running model training and focusing on DevOps tasks and not enough time dedicated to core research. + +To face this, we build an automation framework composed of multiple training pipelines using PyTorch and ClearML. Our framework automates and manages our entire process, from model development to deployment to continuous training for model improvement. + +New data is now immediately processed and fed directly into training pipelines – speeding up workflow, minimizing human error, and freeing up our research team for more important tasks. Thus, enabling us to scale our ML operation and deliver better models for our end users.

        + +

        + +

        + +

        + Medical & Healthcare +

        +
        +
        + +
        +
        + + + +
        + + Q&Aid: A Conversation Agent Powered by PyTorch + +
        +
        Cebere Bogdan, Cebere Tudor, Manolache Andrei, Horia Paul-Ion
        +

        We present Q&Aid, a conversation agent that relies on a series of machine learning models to filter, label, and answer medical questions based on a provided image and text inputs. Q&Aid is simplifying the hospital logic backend by standardizing it to a Health Intel Provider (HIP). A HIP is a collection of models trained on local data that receives text and visual input, afterward filtering, labeling, and feeding the data to the right models and generating at the end output for the aggregator. Any hospital is identified as a HIP holding custom models and labeling based on its knowledge. The hospitals are training and fine-tuning their models, such as a Visual Question Answering (VQA) model, on private data (e.g. brain anomaly segmentation). We aggregate all of the tasks that the hospitals can provide into a single chat app, offering the results to the user. When the chat ends, the transcript is forwarded to each hospital, a doctor being in charge of the final decision.

        + +

        + https://qrgo.page.link/d1fQk +

        + +

        + Medical & Healthcare +

        +
        +
        + +
        +
        + + + +
        + + Sleepbot: Multi-signal Sleep Stage Classifier AI for hospital and home + +
        +
        Jaden Hong, Kevin Tran, Tyler Lee, Paul Lee, Freddie Cha, Louis Jung, Dr. Jung Kyung Hong, Dr. In-Young Yoon, David Lee
        +

        Sleep disorders and insomnia are now regarded as a worldwide problem. Roughly 62% of adults worldwide feel that they don't sleep well. However, sleep is difficult to track so it's not easy to get suitable treatment to improve your sleep quality. Currently, the PSG (Polysomnography) is the only way to evaluate the sleep quality accurately but it's expensive and often inaccurate due to the first night effect. + +We propose a multi-signal sleep stage classifier for contactless sleep tracking: Sleepbot. By automating the manual PSG reading and providing explainable analysis, Sleepbot opens a new possibility to apply sleep staging AI in both home and hospital. With sound recorded by a smartphone app and RF-sensed signal measured by Asleep's non-contact sleep tracker, Sleepbot provides a clinical level of sleep stage classification. + +Sleepbot achieved 85.5 % accuracy in 5-class (Wake, N1, N2, N3, Rem) using PSG signals measured from 3,700 subjects and 77 % accuracy in 3-class (Wake, Sleep, REM) classification using only sound data measured from 1,2000 subjects.

        + +

        + +

        + +

        + Medical & Healthcare +

        +
        +
        + +
        +
        + +
        + PyMDE: Minimum-Distortion Embedding +
        +
        Akshay Agrawal, Alnur Ali, Stephen Boyd
        +

        We present a unifying framework for the vector embedding problem: given a set of items and some known relationships between them, we seek a representation of the items by vectors, possibly subject to some constraints (e.g., requiring the vectors to have zero mean and identity covariance). We want the vectors associated with similar items to be near each other, and vectors associated with dissimilar items to not be near, measured in Euclidean distance. We formalize this by introducing distortion functions, defined for some pairs of the items. Our goal is to choose an embedding that minimizes the total distortion, subject to the constraints. We call this the minimum-distortion embedding (MDE) problem. The MDE framework generalizes many well-known embedding methods, such as PCA, the Laplacian eigenmap, multidimensional scaling, UMAP, and others, and also includes new types of embeddings. + +Our accompanying software library, PyMDE, makes it easy for users to specify and approximately solve MDE problems, enabling experimentation with well-known and custom embeddings alike. By making use of automatic differentiation and hardware acceleration via PyTorch, we are able to scale to very large embedding problems. We will showcase examples of embedding real datasets, including an academic co-authorship network, single-cell mRNA transcriptomes, US census data, and population genetics.

        + +

        + +

        + +

        + Medical & Healthcare +

        +
        +
        + +
        +
        + + + +
        + + TorchIO: Pre-Processing & Augmentation of Medical Images for Deep Learning Applications + +
        +
        Fernando Pérez-García, Rachel Sparks, Sébastien Ourselin
        +

        Processing of medical images such as MRI or CT presents unique challenges compared to RGB images typically used in computer vision. These include a lack of labels for large datasets, high computational costs, and metadata to describe the physical properties of voxels. Data augmentation is used to artificially increase the size of the training datasets. Training with image patches decreases the need for computational power. Spatial metadata needs to be carefully taken into account in order to ensure a correct alignment of volumes. + +We present TorchIO, an open-source Python library to enable efficient loading, preprocessing, augmentation and patch-based sampling of medical images for deep learning. TorchIO follows the style of PyTorch and integrates standard medical image processing libraries to efficiently process images during training of neural networks. TorchIO transforms can be composed, reproduced, traced and extended. We provide multiple generic preprocessing and augmentation operations as well as simulation of MRI-specific artifacts. + +TorchIO was developed to help researchers standardize medical image processing pipelines and allow them to focus on the deep learning experiments. It encourages open science, as it supports reproducibility and is version controlled so that the software can be cited precisely. Due to its modularity, the library is compatible with other frameworks for deep learning with medical images.

        + +

        + +

        + +

        + Medical & Healthcare +

        +
        +
        + +
        +
        + + + +
        + + Deep Learning Based Model to Predict Covid19 Patients' Outcomes on Admission + +
        +
        Laila Rasmy, Ziqian Xie, Degui Zhi
        +

        With the extensive use of electronic records and the availability of historical patient information, predictive models that can help identify patients at risk based on their history at an early stage can be a valuable adjunct to clinician judgment. Deep learning models can better predict patients' outcomes by consuming their medical history regardless of the length and the complexity of such data. We used our Pytorch_EHR framework to train a model that can predict COVID-19 patient's health outcomes on admission. We used the Cerner Real-world COVID-19 (Q2) cohort which included information for 117,496 COVID patients from 62 health systems. We used a cohort of 55,068 patients and defined our outcomes including mortality, intubation, and hospitalization longer than 3 days as binary outcomes. We feed the model with all diagnoses, medication, laboratory results, and other clinical events information available before or on their first COVID-19 encounter admission date. We kept the data preprocessing at a minimum for convenience and practicality relying on the embedding layer that learns features representations from the large training set. Our model showed improved performance compared to other baseline machine learning models like logistic regression (LR). For in-hospital mortality, our model showed AUROC of 89.5%, 90.6%, and 84.3% for in-hospital mortality, intubation, and hospitalization for more than 3 days, respectively versus LR which showed 82.8%, 83.2%, and 76.8%

        + +

        + https://github.com/ZhiGroup/pytorch_ehr +

        + +

        + Medical & Healthcare +

        +
        +
        + +
        +
        + + + +
        + + Rolling out Transformers with TorchScript and Inferentia + +
        +
        Binghui Ouyang, Alexander O’Connor
        +

        While Transformers have brought unprecedented improvements in the accuracy and ease of developing NLP applications, their deployment remains challenging due to the large size of the models and their computational complexity. + Indeed, until recently is has been a widespread misconception that hosting high-performance transformer-based models was prohibitively expensive, and technically challenging. Fortunately, recent advances in both the PyTorch ecosystem and in custom hardware for inference have created a world where models can be deployed in a cost-effective, scalable way, without the need for complex engineering. + +In this presentation, we will discuss the use of PyTorch and AWS Inferentia to deploy production-scale models in chatbot intent classification - a particularly relevant and demanding scenario. + +Autodesk deploys a number of transformer based models to solve customer support issues across our channels, and our ability to provide a flexible, high-quality machine learning solution is supported by leveraging cutting-edge technology such as transformer based classification. Our chatbot, AVA, responds to tens of thousands of customer interactions monthly, and we are evolving our architecture to be supported by customer inference. + +We will discuss our experience of piloting transformer-based intent models, and present a workflow for going from data to deployment for similar projects.

        + +

        + +

        + +

        + NLP & Multimodal, RL & Time Series +

        +
        +
        + +
        +
        + + + +
        + + PyTorchTS: PyTorch Probabilistic Time Series Forecasting Framework + +
        +
        Kashif Rasul
        +

        PyTorchTS is a PyTorch based Probabilistic Time Series forecasting framework that comes with state of the art univariate and multivariate models.

        + +

        + https://github.com/zalandoresearch/pytorch-ts +

        + +

        + NLP & Multimodal, RL & Time Series +

        +
        +
        + +
        +
        + + + +
        + + MMF: A modular framework for multimodal research + +
        +
        Sasha Sheng, Amanpreet Singh
        +

        MMF is designed from ground up to let you focus on what matters -- your model -- by providing boilerplate code for distributed training, common datasets and state-of-the-art pretrained baselines out-of-the-box. MMF is built on top of PyTorch that brings all of its power in your hands. MMF is not strongly opinionated. So you can use all of your PyTorch knowledge here. MMF is created to be easily extensible and composable. Through our modular design, you can use specific components from MMF that you care about. Our configuration system allows MMF to easily adapt to your needs.

        + +

        + +

        + +

        + NLP & Multimodal, RL & Time Series +

        +
        +
        + +
        +
        + +
        + AllenNLP: An NLP research library for developing state-of-the-art models +
        +
        Dirk Groeneveld, Akshita Bhagia, Pete Walsh, Michael Schmitz
        +

        An Apache 2.0 NLP research library, built on PyTorch, for developing state-of-the-art deep learning models on a wide variety of linguistic tasks.

        + +

        + https://github.com/allenai/allennlp +

        + +

        + NLP & Multimodal, RL & Time Series +

        +
        +
        + +
        +
        + + + +
        + + Project Spock at Tubi: Understanding Content using Deep Learning for NLP + +
        +
        John Trenkle, Jaya Kawale & Tubi ML team
        +

        Tubi is one of the leading platforms providing free high-quality streaming movies and TV shows to a worldwide audience. We embrace a data-driven approach and leverage advanced machine learning techniques using PyTorch to enhance our platform and business in any way we can. The Three Pillars of AVOD are the guiding principle for our work. The Pillars are +Content: all the titles we maintain in our library +Audience: everyone who watches titles on Tubi +Advertising: ads shown to viewers on behalf of brands + +In this poster, we'll focus on the Content aspect with more details for the various use cases especially Content Understanding. Content is an important pillar of Tubi since to be successful, we need to look at existing titles and beyond what we already have and attempt to understand all of the titles out in the wild and how they could benefit our platform in some fashion. Content Understanding revolves around digesting a rich collection of 1st- and 3rd-party data in structured (metadata) and unstructured (text) forms and developing representations that capture the essence of those Titles. With the analogy of linear algebra, we can say we are attempting to project Title vectors from the universe to our tubiverse with as much fidelity as possible in order to ascertain potential value for each target use case. We will describe several techniques to understand content better using Pytorch.

        + +

        + +

        + +

        + NLP & Multimodal, RL & Time Series +

        +
        +
        + +
        +
        + + + +
        + + RL Based Performance Optimization of Deep Neural Networks + +
        +
        Benoit Steiner, Chris Cummins, Horace He, Hugh Leather
        +

        As the usage of machine learning techniques is becoming ubiquitous, the efficient execution of neural networks is crucial to many applications. Frameworks, such as Halide and TVM, separate the algorithmic representation of +the deep learning model from the schedule that determines its implementation. Finding good schedules, however, remains extremely challenging. Auto-tuning methods, which search the space of valid schedules and execute each candidate on the hardware, identify some of the best performing schedules, but the search can take hours, hampering the productivity of deep learning practitioners. What is needed is a method that achieves a similar performance without extensive search, delivering the needed efficiency quickly. + +Using PyTorch, we model the scheduling process as a sequence of optimization choices, and implement a new technique to accurately predict the expected performance of a partial schedule using a LSTM over carefully engineered features that describe each DNN operator and their current scheduling choices. Leveraging these predictions we are able to make these optimization decisions greedily and, without any executions on the target hardware, rapidly identify an efficient schedule. +This techniques enables to find schedules that improve the execution performance of deep neural networks by 2.6× over Halide and 1.5× over TVM. Moreover, our technique completes in seconds instead of hours, making it possible to include it as a new backend for PyTorch itself.

        + +

        + http://facebook.ai +

        + +

        + NLP & Multimodal, RL & Time Series +

        +
        +
        + +
        +
        + + + +
        + + A Data-Centric Framework for Composable NLP + +
        +
        Zhenghong Liu
        +

        Forte is an open-source toolkit for building Natural Language Processing workflows via assembling state-of-the-art NLP and ML technologies. This toolkit features composable pipeline, cross-task interaction, adaptable data-model interfaces. The highly composable design allows users to build complex NLP pipelines of a wide range of tasks including document retrieval, information extraction, and text generation by combining existing toolkits or customized PyTorch models. The cross-task interaction ability allows developers to utilize the results from individual tasks to make informed decisions. The data-model interface helps developers to focus on building reusable PyTorch models by abstracting out domain and preprocessing details. We show that Forte can be used to build complex pipelines, and the resulting pipeline can be easily adapted to different domains and tasks with small changes in the code.

        + +

        + https://github.com/asyml/forte +

        + +

        + NLP & Multimodal, RL & Time Series +

        +
        +
        + +
        +
        + + + +
        + + Environments and Baselines for Multitask Reinforcement Learning + +
        +
        Shagun Sodhani, Amy Zhang, Ludovic Denoyer, Pierre-Alexandre Kamienny, Olivier Delalleau
        +

        The two key components in a multi-task RL codebase are (i) Multi-task RL algorithms and (ii) Multi-task RL environments. We develop open-source libraries for both components. [MTRL](https://github.com/facebookresearch/mtrl) provides components to implement multi-task RL algorithms, and [MTEnv](https://github.com/facebookresearch/mtenv) is a library to interface with existing multi-task RL environments and create new ones. + +MTRL has two building blocks: (i) single task policy and (ii) components to augment the single-task policy for multi-task setup. The ideal workflow is to start with a base policy and add multi-task components as they seem fit. MTRL enables algorithms like GradNorm, Distral, HiPBMDP, PCGrad, Soft Modularization, etc. + +MTEnv is an effort to standardize multi-task RL environments and provide better benchmarks. We extend the Gym API to support multiple tasks, with two guiding principles: (i) Make minimal changes to the Gym Interface (which the community is very familiar with) and (ii) Make it easy to port existing environments to MTEnv. Additionally, we provide a collection of commonly used multi-task RL environments (Acrobot, Cartpole, Multitask variant of DeepMind Control Suite, Meta-World, Multi-armed Bandit, etc.). The RL practitioner can combine its own environments with the MTEnv wrappers to add multi-task support with a small code change. + +MTRL and MTEnv are used in several ongoing/published works at FAIR.

        + +

        + http://qr.w69b.com/g/tGZSFw33G +

        + +

        + NLP & Multimodal, RL & Time Series +

        +
        +
        + +
        +
        + + + +
        + + The Hugging Face Ecosystem + +
        +
        Lysandre Debut, Sylvain Gugger, Quentin Lhoest 
        +

        Transfer learning has become the norm to get state-of-the-art results in NLP. Hugging Face provides you with tools to help you on every step along the way: + +- A free git-based shared hub with more than 7,500 PyTorch checkpoints, and more than 800 NLP datasets. +- The ? Datasets library, to easily download the dataset, manipulate it and prepare it. +- The ? Tokenizers library, that provides ultra-fast tokenizers backed by Rust, and converts text in PyTorch tensors. +- The ? Transformers library, providing more than 45 PyTorch implementations of Transformer architectures as simple nn.Module as well as a training API. +- The ? Accelerate library, a non-intrusive API that allows you to run your raw training loop on any distributed setup. + +The pipeline is then simply a six-step process: select a pretrained model from the hub, handle the data with Datasets, tokenize the text with Tokenizers, load the model with Transformers, train it with the Trainer or your own loop powered by Accelerate, before sharing your results with the community on the hub.

        + +

        + https://huggingface.co/models +

        + +

        + NLP & Multimodal, RL & Time Series +

        +
        +
        + +
        +
        + + + +
        + +  Asteroid: the Pytorch-based Audio Source Separation Toolkit for Researchers + +
        +
        Manuel Pariente, Samuele Cornell, Jonas Haag, Joris Cosentino, Michel Olvera, Fabian-Robert Stöter, Efthymios Tzinis
        +

        Asteroid is an audio source separation toolkit built with PyTorch and PyTorch-Lightning. Inspired by the most successful neural source separation systems, it provides all neural building blocks required to build such a system. To improve reproducibility, recipes on common audio source separation datasets are provided, including all the steps from data download\preparation through training to evaluation as well as many current state-of-the-art DNN models. Asteroid exposes all levels of granularity to the user from simple layers to complete ready-to-use models. Our pretrained models are hosted on the asteroid-models community in Zenodo and on the Huggingface model Hub. Loading and using pretrained models is trivial and sharing them is also made easy with asteroid's CLI.","poster_showcase":"Audio Source Separation, Speech Processing, Deep Learning","email":"cornellsamuele@gmail.com"}

        + +

        + https://asteroid-team.github.io/ +

        + +

        + NLP & Multimodal, RL & Time Series +

        +
        +
        + +
        +
        + + + +
        + + rlstructures: A Lightweight Python Library for Reinforcement Learning Research + +
        +
        Ludovic Denoyer, Danielle Rothermel, Xavier Martinet
        +

        RLStructures is a lightweight Python library that provides simple APIs as well as data structures that make as few assumptions as possible about the structure of your agent or your task, while allowing for transparently executing multiple policies on multiple environments in parallel (incl. multiple GPUs). It thus facilitates the implementation of RL algorithms while avoiding complex abstractions.

        + +

        + +

        + +

        + NLP & Multimodal, RL & Time Series +

        +
        +
        + +
        +
        + + + +
        + + MBRL-Lib: a PyTorch toolbox for model-based reinforcement learning research + +
        +
        Luis Pineda, Brandon Amos, Amy Zhang, Nathan O. Lambert, Roberto Calandra
        +

        Model-based reinforcement learning (MBRL) is an active area of research with enormous potential. In contrast to model-free RL, MBRL algorithms solve tasks by learning a predictive model of the task dynamics, and use this model to predict the future and facilitate decision making. Many researchers have argued that MBRL can result in lower sample complexity, better generalization, as well as safer and more interpretable decisions. However, despite the surge in popularity and great potential of MBRL, there is currently no widely accepted library for facilitating research in this area. Since MBRL methods often involve the interplay of complex components such as probabilistic ensembles, latent variable models, planning algorithms, and even model-free methods, the lack of such a library raises the entry bar to the field and slows down research efforts. In this work we aim to solve this problem by introducing MBRL-Lib, a modular PyTorch toolbox specifically designed for facilitating research on model-based reinforcement learning. MBRL-Lib provides interchangeable options for training dynamics models and running planning algorithms, which can then be used in a mix and match fashion to create novel MBRL methods. The library also provides a set of utility functions to run common MBRL tasks, as well a set of diagnostics tools to identify potential issues while training dynamics models and control algorithms.

        + +

        + https://github.com/facebookresearch/mbrl-lib +

        + +

        + NLP & Multimodal, RL & Time Series +

        +
        +
        + +
        +
        + + + +
        + + Introducing New PyTorch Profiler + +
        +
        Geeta Chauhan, Gisle Dankel, Elena Neroslavaskaya
        +

        Analyzing and improving large-scale deep learning model performance is an ongoing challenge that continues to grow in importance as the model sizes increase. Microsoft and Facebook collaborated to create a native PyTorch performance debugging tool called PyTorch Profiler. The profiler builds on the PyTorch autograd profiler foundation, adds a new high fidelity GPU profiling engine, and out-of-the-box bottleneck analysis tool in Tensorboard. New Profiler delivers the simplest experience available to date where users can profile their models without installing any additional packages and see results immediately in Tensorboard. Until today, beginner users of PyTorch may not have attempted to profile their models due to the task complexity. With the new bottleneck analysis tool, they will find profiling easy and accessible. Experienced users will be delighted by the detailed trace views which illustrate GPU kernel execution events and their relationship to the PyTorch operations. Come learn how to profile your PyTorch models using this new delightfully simple tool.

        + +

        + https://pytorch.org/blog/introducing-pytorch-profiler-the-new-and-improved-performance-tool +

        + +

        + Performance & Profiler +

        +
        +
        + +
        +
        + + + +
        + + TRTorch: A Compiler for TorchScript Targeting NVIDIA GPUs with TensorRT + +
        +
        Naren Dasan
        +

        For experimentation and the development of machine learning models, few tools are as approachable as PyTorch. However, when moving from research to production, some of the features that make PyTorch great for development make it hard to deploy. With the introduction of TorchScript, PyTorch has solid tooling for addressing some of the problems of deploying PyTorch models. TorchScript removes the dependency on Python and produces portable, self contained, static representations of code and weights. But in addition to portability, users also look to optimize performance in deployment. When deploying on NVIDIA GPUs, TensorRT, NVIDIA's deep learning optimizer, provides the capability to maximize performance of workloads by tuning the execution of models for specific target hardware. TensorRT also provides tooling for conducting further optimization through mixed and reduced precision execution and post training quantization (PTQ). We present TRTorch, a compiler for PyTorch and TorchScript targeting NVIDIA GPUs, which combines the usability of PyTorch with the performance of TensorRT and allows users to fully optimize their inference workloads without leaving the PyTorch ecosystem. It also simplifies conducting complex optimizations like PTQ by leveraging common PyTorch tooling. TRTorch can be used directly from PyTorch as a TorchScript Backend, embedded in an application or used from the command line to easily increase the performance of inference applications.

        + +

        + https://nvidia.github.io/TRTorch/ +

        + +

        + Performance & Profiler +

        +
        +
        + +
        +
        + + + +
        + + WeightWatcher: A Diagnostic Tool for DNNs + +
        +
        Charles H. Martin
        +

        WeightWatcher (WW) is an open-source, diagnostic tool for analyzing Deep Neural Networks (DNN), without needing access to training or even test data. It can be used to: analyze pre/trained pyTorch models; +inspect models that are difficult to train; gauge improvements in model performance; predict test accuracies across different models; and detect potential problems when compressing or fine-tuning pretrained models. + +WeightWatcher is based on theoretical research (done in\-joint with UC Berkeley) into "Why Deep Learning Works", using ideas from Random Matrix Theory (RMT), Statistical Mechanics, and Strongly Correlated Systems.

        + +

        + +

        + +

        + Performance & Profiler +

        +
        +
        + +
        +
        + +
        + Constrained Optimization in PyTorch 1.9 Through Parametrizations +
        +
        Mario Lezcano-Casado
        +

        "This poster presents the ""parametrizations"" feature that will be added to PyTorch in 1.9.0. +This feature allows for a simple implementation of methods like pruning, weight_normalization or spectral_normalization. +More generally, it implements a way to have ""computed parameters"". This means that we replace a parameter `weight` in a layer with `f(weight)`, where `f` is an arbitrary module. In other words, after putting a parametrization `f` on `layer.weight`, `layer.weight` will return `f(weight)`. +They implement a caching system, so that the value `f(weight)` is computed just once during the forward pass. +A module that implements a parametrisation may also have a `right_inverse` method. If this method is present, it is possible to assign to a parametrised tensor. This is useful when initialising a parametrised tensor. +This feature can be seen as a first step towards invertible modules. In particular, it may also help making distributions first-class citizens in PyTorch. +Parametrisations also allows for a simple implementation of constrained optimisation. From this perspective, parametrisation maps an unconstrained tensor to a constrained space such as the space of orthogonal matrices, SPD matrices, low-rank matrices... This approach is implemented in the library GeoTorch (https://github.com/Lezcano/geotorch/)."

        + +

        + +

        + +

        + Performance & Profiler +

        +
        +
        + +
        +
        + + + +
        + + Distributed Pytorch with Ray + +
        +
        Richard Liaw, Kai Fricke, Amog Kamsetty, Michael Galarnyk
        +

        Ray is a popular framework for distributed Python that can be paired with PyTorch to rapidly scale machine learning applications. Ray contains a large ecosystem of applications and libraries that leverage and integrate with Pytorch. This includes Ray Tune, a Python library for experiment execution and hyperparameter tuning at any scale; RLlib, a state-of-the-art library for reinforcement learning; and Ray Serve, a library for scalable model serving. Together, Ray and Pytorch are becoming the core foundation for the next generation of production machine learning platforms.

        + +

        + https://ray.io/ +

        + +

        + Platforms & Ops & Tools +

        +
        +
        + +
        +
        + + + +
        + + Avalanche: an End-to-End Library for Continual Learning based on PyTorch + +
        +
        Vincenzo Lomonaco, Lorenzo Pellegrini Andrea Cossu, Antonio Carta, Gabriele Graffieti
        +

        Learning continually from non-stationary data stream is a long sought goal of machine learning research. Recently, we have witnessed a renewed and fast-growing interest in Continual Learning, especially within the deep learning community. However, algorithmic solutions are often difficult to re-implement, evaluate and port across different settings, where even results on standard benchmarks are hard to reproduce. In this work, we propose an open-source, end-to-end library for continual learning based on PyTorch that may provide a shared and collaborative code-base for fast prototyping, training and reproducible evaluation of continual learning algorithms.

        + +

        + https://avalanche.continualai.org +

        + +

        + Platforms & Ops & Tools +

        +
        +
        + +
        +
        + + + +
        + + PyTorch on IBM Z and LinuxONE (s390x) + +
        +
        Hong Xu
        +

        IBM Z is a hardware product line for mission-critical applications, such as finance and health applications. It employs its own CPU architecture, which PyTorch does not officially support. In this poster, we discuss why it is important to support PyTorch on Z. Then, we show our prebuilt minimal PyTorch package for IBM Z. Finally, we demonstrate our continuing commitment to make more PyTorch features available on IBM Z.

        + +

        + https://codait.github.io/pytorch-on-z +

        + +

        + Platforms & Ops & Tools +

        +
        +
        + +
        +
        + + + +
        + + The Fundamentals of MLOps for R&D: Orchestration, Automation, Reproducibility + +
        +
        Dr. Ariel Biller
        +

        Both from sanity considerations and the productivity perspective, Data Scientists, ML engineers, Graduate students, and other research-facing roles are all starting to adopt best-practices from production-grade MLOps. + +However, most toolchains come with a hefty price of extra code and maintenance, which reduces the actual time available for R&D. We will show an alternative approach using ClearML, the open-source MLOps solution. + +In this "best-practices" poster, we will overview the "must-haves" of R&D-MLOPs: +Orchestration, Automation, and Reproducibility. These enable easy remote execution through magically reproducible setups and even custom, reusable, bottom-up pipelines. + +We will take a single example and schematically transform it from the "as downloaded from GitHub" stage to a fully-fledged, scalable, version-controlled, parameterizable R&D pipeline. We will measure the number of changes needed to the codebase and provide evidence of real low-cost integration. All code, logs, and metrics will be available as supporting information.

        + +

        + +

        + +

        + Platforms & Ops & Tools +

        +
        +
        + +
        +
        + + + +
        + + FairTorch: Aspiring to Mitigate the Unfairness of Machine Learning Models + +
        +
        Masashi Sode, Akihiko Fukuchi, Yoki Yabe, Yasufumi Nakata
        +

        Is your machine learning model fair enough to be used in your system? What if a recruiting AI discriminates on gender and race? What if the accuracy of medical AI depends on a person's annual income or on the GDP of the country where it is used? Today's AI has the potential to cause such problems. In recent years, fairness in machine learning has received increasing attention. If current machine learning models used for decision making may cause unfair discrimination, developing a fair machine learning model is an important goal in many areas, such as medicine, employment, and politics. Despite the importance of this goal to society, as of 2020, there was no PyTorchÂč project incorporating fairness into a machine learning model. To solve this problem, we created FairTorch at the PyTorch Summer Hackathon 2020. + +FairTorch provides a tool to mitigate the unfairness of machine learning models. A unique feature of our tool is that it allows you to add a fairness constraint to your model by adding only a few lines of code, using the fairness criteria provided in the library.

        + +

        + https://github.com/wbawakate/fairtorch +

        + +

        + Platforms & Ops & Tools +

        +
        +
        + +
        +
        + + + +
        + + TorchDrift: Drift Detection for PyTorch + +
        +
        Thomas Viehmann, Luca Antiga
        +

        When machine learning models are deployed to solve a given task, a crucial question is whether they are actually able to perform as expected. TorchDrift addresses one aspect of the answer, namely drift detection, or whether the information flowing through our models - either probed at the input, output or somewhere in-between - is still consistent with the one it was trained and evaluated on. In a nutshell, TorchDrift is designed to be plugged into PyTorch models and check whether they are operating within spec. +TorchDrift's principles apply PyTorch's motto _from research to production_ to drift detection: We provide a library of methods that canbe used as baselines or building blocks for drift detection research, as well as provide practitioners deploying PyTorch models in production with up-to-date methods and educational material for building the necessary statistical background. Here we introduce TorchDrift with an example illustrating the underlying two-sample tests. We show how TorchDrift can be integrated in high-performance runtimes such as TorchServe or RedisAI, to enable drift detection in real-world applications thanks to the PyTorch JIT.

        + +

        + https://torchdrift.org/ +

        + +

        + Platforms & Ops & Tools +

        +
        +
        + +
        +
        + + + +
        + + Ouroboros: MLOps for Automated Driving + +
        +
        Quincy Chen, Arjun Bhargava, Sudeep Pillai, Marcus Pan, Chao Fang, Chris Ochoa, Adrien Gaidon, Kuan-Hui Lee, Wolfram Burgard
        +

        Modern machine learning for autonomous vehicles requires a fundamentally different infrastructure and production lifecycle from their standard software continuous-integration/continuous-deployment counterparts. At Toyota Research Institute (TRI), we have developed ​Ouroboros​ - a modern ML platform that supports the end-to-end lifecycle of all ML models delivered to TRI's autonomous vehicle fleets. We envision that all ML models delivered to our fleet undergo a systematic and rigorous treatment. Ouroboros delivers several essential features including: +a. ML dataset governance and infrastructure-as-code​ that ensures the traceability, reproducibility, standardization, and fairness for all ML datasets and models procedurally generated and delivered to the TRI fleet. +b. Unified ML dataset and model management:​ An unified and streamlined workflow for ML dataset curation, label management, and model development that supports several key ML models delivered to the TRI fleet today +c. A Large-scale Multi-task, Multi-modal Dataset for Automated Driving​ that supports the development of various models today, including 3D object detection, 2D object detection, 2D BeVFlow, Panoptic Segmentation; +d. Orchestrated ML workflows​ to stand up scalable ML applications such as push-button re-training solutions, ML CI/CDs pipelines, Dataset Curation workflows, Auto-labelling pipelines, leveraging the most up-to-date cloud tools available. along their lifecycles, ensuring strong governance on building reusable, reproducible, robust, traceable, and fair ML models for the production driving setting. By following the best MLOps practices, we expect our platform to lay the foundation for continuous life-long learning in our autonomous vehicle fleets and accelerate the transition from research to production.

        + +

        + https://github.com/TRI-ML +

        + +

        + Platforms & Ops & Tools +

        +
        +
        + +
        +
        + + + +
        + + carefree-learn: Tabular Datasets ❀ PyTorch + +
        +
        Yujian He
        +

        carefree-learn makes PyTorch accessible to people who are familiar with machine learning but not necessarily PyTorch. By having already implemented all the pre-processing and post-processing under the hood, users can focus on implementing the core machine learning algorithms / models with PyTorch and test them on various datasets. By having designed the whole structure carefully, users can easily customize every block in the whole pipeline, and can also 'combine' the implemented blocks to 'construct' new models without efforts. By having carefully made abstractions users can adapt it to their specific down-stream tasks, such as quantitative trading (in fact I've already implemented one for my company and it works pretty well XD). carefree-learn handles distributed training carefully, so users can either run multiple tasks at the same time, or run a huge model with DDP in one line of code. carefree-learn also integrates with mlflow and supports exporting to ONNX, which means it is ready for production to some extend.

        + +

        + +

        + +

        + Platforms & Ops & Tools +

        +
        +
        + +
        +
        + + + +
        + + OpenMMLab: An Open-Source Algorithm Platform for Computer Vision + +
        +
        Wenwei Zhang
        +

        OpenMMLab project builds open-source toolboxes for Artificial Intelligence (AI). It aims to 1) provide high-quality codebases to reduce the difficulties in algorithm reimplementation; 2) provide a complete research platform to accelerate the research production; and 3) shorten the gap between research production to the industrial applications. Based on PyTorch, OpenMMLab develops MMCV to provide unified abstract training APIs and common utils, which serves as a foundation of 15+ toolboxes and 40+ datasets. + +Since the initial release in October 2018, OpenMMLab has released 15+ toolboxes that cover 10+ directions, implement 100+ algorithms, and contain 1000+ pre-trained models. With a tighter collaboration with the community, OpenMMLab will release more toolboxes with more flexible and easy-to-use training frameworks in the future.

        + +

        + https://openmmlab.com/ +

        + +

        + Platforms & Ops & Tools +

        +
        +
        + +
        +
        + + + +
        + + Catalyst – Accelerated deep learning R&D + +
        +
        Sergey Kolesnikov
        +

        For the last three years, Catalyst-Team and collaborators have been working on Catalyst  - a high-level PyTorch framework Deep Learning Research and Development. It focuses on reproducibility, rapid experimentation, and codebase reuse so you can create something new rather than write yet another train loop. You get metrics, model checkpointing, advanced logging, and distributed training support without the boilerplate and low-level bugs.

        + +

        + https://catalyst-team.com +

        + +

        + Platforms & Ops & Tools +

        +
        +
        + +
        +
        + + + +
        + + High-fidelity performance metrics for generative models in PyTorch + +
        +
        Anton Obukhov
        +

        Evaluation of generative models such as GANs is an important part of deep learning research. In 2D image generation, three approaches became widely spread: Inception Score, Fréchet Inception Distance, and Kernel Inception Distance. Despite having a clear mathematical and algorithmic description, these metrics were initially implemented in TensorFlow and inherited a few properties of the framework itself, such as a specific implementation of the interpolation function. These design decisions were effectively baked into the evaluation protocol and became an inherent part of the specification of the metrics. As a result, researchers wishing to compare against state of the art in generative modeling are forced to perform an evaluation using the original metric authors' codebases. Reimplementations of metrics in PyTorch and other frameworks exist, but they do not provide a proper level of fidelity, thus making them unsuitable for reporting results and comparing them to other methods. This software aims to provide epsilon-exact implementations of the said metrics in PyTorch and remove inconveniences associated with generative model evaluation and development. All the evaluation pipeline steps are correctly tested, with relative errors and sources of remaining non-determinism summarized in sections below. +TLDR; fast and reliable GAN evaluation in PyTorch

        + +

        + https://github.com/toshas/torch-fidelity +

        + +

        + Platforms & Ops & Tools +

        +
        +
        + +
        +
        + + + +
        + + Using Satellite Imagery to Identify Oceanic Oil Pollution + +
        +
        Jona Raphael (jona@skytruth.org), Ben Eggleston, Ryan Covington, Tatianna Evanisko, John Amos
        +

        Operational oil discharges from ships, also known as "bilge dumping," have been identified as a major source of petroleum products entering our oceans, cumulatively exceeding the largest oil spills, such as the Exxon Valdez and Deepwater Horizon spills, even when considered over short time spans. However, we still don't have a good estimate of +● How much oil is being discharged; +● Where the discharge is happening; +● Who the responsible vessels are. +This makes it difficult to prevent and effectively respond to oil pollution that can damage our marine and coastal environments and economies that depend on them. + +In this poster we will share SkyTruth's recent work to address these gaps using machine learning tools to detect oil pollution events and identify the responsible vessels when possible. We use a convolutional neural network (CNN) in a ResNet-34 architecture to perform pixel segmentation on all incoming Sentinel-1 synthetic aperture radar (SAR) imagery to classify slicks. Despite the satellites' incomplete oceanic coverage, we have been detecting an average of 135 vessel slicks per month, and have identified several geographic hotspots where oily discharges are occurring regularly. For the images that capture a vessel in the act of discharging oil, we rely on an Automatic Identification System (AIS) database to extract details about the ships, including vessel type and flag state. We will share our experience +● Making sufficient training data from inherently sparse satellite image datasets; +● Building a computer vision model using PyTorch and fastai; +● Fully automating the process in the Amazon Web Services (AWS) cloud. +The application has been running continuously since August 2020, has processed over 380,000 Sentinel-1 images, and has populated a database with more than 1100 high-confidence slicks from vessels. We will be discussing preliminary results from this dataset and remaining challenges to be overcome. +Learn more at https://skytruth.org/bilge-dumping/

        + +

        + +

        + +

        + Vision +

        +
        +
        + +
        +
        + + + +
        + + UPIT: A fastai Package for Unpaired Image-to-Image Translation + +
        +
        Tanishq Abraham
        +

        Unpaired image-to-image translation algorithms have been used for various computer vision tasks like style transfer and domain adaption. Such algorithms are highly attractive because they alleviate the need for the collection of paired datasets. In this poster, we demonstrate UPIT, a novel fastai/PyTorch package (built with nbdev) for unpaired image-to-image translation. It implements various state-of-the-art unpaired image-to-image translation algorithms such as CycleGAN, DualGAN, UNIT, and more. It enables simple training and inference on unpaired datasets. It also comes with implementations of commonly used metrics like FID, KID, and LPIPS. It also comes with Weights-and-Biases integration for easy experiment tracking. Since it is built on top of fastai and PyTorch, it comes with support for mixed-precision and multi-GPU training. It is highly flexible, and custom dataset types, models, and metrics can be used as well. With UPIT, training and applying unpaired image-to-image translation only takes a few lines of code.

        + +

        + https://github.com/tmabraham/UPIT +

        + +

        + Vision +

        +
        +
        + +
        +
        + + + +
        + + PyTorchVideo: A Deep Learning Library for Video Understanding + +
        +
        Aaron Adcock, Bo Xiong, Christoph Feichtenhofer, Haoqi Fan, Heng Wang, Kalyan Vasudev Alwala, Matt Feiszli, Tullie Murrell, Wan-Yen Lo, Yanghao Li, Yilei Li, Zhicheng Yan
        +

        PyTorchVideo is the new Facebook AI deep learning library for video understanding research. It contains variety of state of the art pretrained video models, dataset, augmentation, tools for video understanding. PyTorchVideo provides efficient video components on accelerated inference on mobile device.

        + +

        + https://pytorchvideo.org/ +

        + +

        + Vision +

        +
        +
        + +
        +
        + +
        + Deep Learning Enables Fast and Dense Single-Molecule Localization with High Accuracy +
        +
        A. Speiser, L-R. MĂŒller, P. Hoess, U. Matti, C. J. Obara, J. H. Macke, J. Ries, S. C. Turaga
        +

        Single-molecule localization microscopy (SMLM) has had remarkable success in imaging cellular structures with nanometer resolution, but the need for activating only single isolated emitters limits imaging speed and labeling density. Here, we overcome this major limitation using deep learning. We developed DECODE, a computational tool that can localize single emitters at high density in 3D with the highest accuracy for a large range of imaging modalities and conditions. In a public software benchmark competition, it outperformed all other fitters on 12 out of 12 data-sets when comparing both detection accuracy and localization error, often by a substantial margin. DECODE allowed us to take live-cell SMLM data with reduced light exposure in just 3 seconds and to image microtubules at ultra-high labeling density. Packaged for simple installation and use, DECODE will enable many labs to reduce imaging times and increase localization density in SMLM.

        + +

        + http://github.com/turagalab/decode +

        + +

        + Vision +

        +
        +
        + +
        +
        + + + +
        + + A Robust PyTorch Trainable Entry Convnet Layer in Fourier Domain + +
        +
        Abraham SĂĄnchez, Guillermo Mendoza, E. Ulises Moya-SĂĄnchez
        +

        We draw inspiration from the cortical area V1. We try to mimic their main processing properties by means of: quaternion local phase/orientation to compute lines and edges detection in a specific direction. We analyze how this layer is robust by its greometry to large illumination and brightness changes.

        + +

        + https://gitlab.com/ab.sanchezperez/pytorch-monogenic +

        + +

        + Vision +

        +
        +
        + +
        +
        + + + +
        + + PyroNear: Embedded Deep Learning for Early Wildfire Detection + +
        +
        François-Guillaume Fernandez, Mateo Lostanlen, Sebastien Elmaleh, Bruno Lenzi, Felix Veith, and more than 15+ contributors
        +

        "PyroNear is non-profit organization composed solely of volunteers which was created in late 2019. Our core belief is that recent technological developments can support the cohabitation between mankind & its natural habitat. We strive towards high-performing, accessible & affordable tech-solutions for protection against natural hazards. More specifically, our first efforts are focused on wildfire protection by increasing the coverage of automatic detection systems. + +Our ongoing initiative has now gathered dozens of volunteers to put up the following main contributions: +- Computer Vision: compiling open-source models and datasets (soon to be published) for vision tasks related to wildfire detection +- Edge Computing: developing an affordable physical prototype running our PyTorch model on a Raspberry Pi +- End-to-end detection workflow: building a responsible end-to-end system for large scale detection and alert management (API, front-end monitoring platform) +- Deployment: working with French firefighter departments to gather field knowledge and conduct a test phase over the incoming European summer." +PyTorch3D is a modular and optimized library for 3D Deep Learning with PyTorch. It includes support for: data structures for heterogeneous batching of 3D data (Meshes, Point clouds and Volumes), optimized 3D operators and loss functions (with custom CUDA kernels), a modular differentiable rendering API for Meshes, Point clouds and Implicit functions, as well as several other tools for 3D Deep Learning.

        + +

        + https://github.com/pyronear +

        + +

        + Vision +

        +
        +
        + +
        +
        + + + +
        + + PyTorch3D: Fast, Flexible, 3D Deep Learning + +
        +
        Nikhila Ravi, Jeremy Reizenstein, David Novotny, Justin Johnson, Georgia Gkioxari, Roman Shapovalov, Patrick Labatut, Wan-Yen Lo
        +

        PyTorch3D is a modular and optimized library for 3D Deep Learning with PyTorch. It includes support for: data structures for heterogeneous batching of 3D data (Meshes, Point clouds and Volumes), optimized 3D operators and loss functions (with custom CUDA kernels), a modular differentiable rendering API for Meshes, Point clouds and Implicit functions, as well as several other tools for 3D Deep Learning.

        + +

        + https://arxiv.org/abs/2007.08501 +

        + +

        + Vision +

        +
        +
        + +
        +
        + + + +
        + + Kornia: an Open Source Differentiable Computer Vision Library for PyTorch + +
        +
        E. Riba, J. Shi, D. Mishkin, L. Ferraz, A. Nicolao
        +

        This work presents Kornia, an open source computer vision library built upon a set of differentiable routines and modules that aims to solve generic computer vision problems. The package uses PyTorch as its main backend, not only for efficiency but also to take advantage of the reverse auto-differentiation engine to define and compute the gradient of complex functions. Inspired by OpenCV, Kornia is composed of a set of modules containing operators that can be integrated into neural networks to train models to perform a wide range of operations including image transformations,camera calibration, epipolar geometry, and low level image processing techniques, such as filtering and edge detection that operate directly on high dimensional tensor representations on graphical processing units, generating faster systems. Examples of classical vision problems implemented using our framework are provided including a benchmark comparing to existing vision libraries.

        + +

        + http://www.kornia.org +

        + +

        + Vision +

        +
        +
        + +
        +
        + + + +
        + + NNGeometry: Easy and Fast Fisher Information Matrices and Neural Tangent Kernels in PyTorch + +
        +
        Thomas George
        +

        Fisher Information Matrices (FIM) and Neural Tangent Kernels (NTK) are useful tools in a number of diverse applications related to neural networks. Yet these theoretical tools are often difficult to implement using current libraries for practical size networks, given that they require per-example gradients, and a large amount of memory since they scale as the number of parameters (for the FIM) or the number of examples x cardinality of the output space (for the NTK). NNGeometry is a PyTorch library that offers a high level API for computing various linear algebra operations such as matrix-vector products, trace, frobenius norm, and so on, where the matrix is either the FIM or the NTK, leveraging recent advances in approximating these matrices.

        + +

        + https://github.com/tfjgeorge/nngeometry/ +

        + +

        + Vision +

        +
        +
        + +
        +
        + + + +
        + + CompressAI: a research library and evaluation platform for end-to-end compression + +
        +
        Bégaint J., Racapé F., Feltman S., Pushparaja A.
        +

        CompressAI is a PyTorch library that provides custom operations, layers, modules and tools to research, develop and evaluate end-to-end image and video compression codecs. In particular, CompressAI includes pre-trained models and evaluation tools to compare learned methods with traditional codecs. State-of-the-art end-to-end compression models have been reimplemented in PyTorch and trained from scratch, reproducing published results and allowing further research in the domain.

        + +

        + +

        + +

        + Vision +

        +
        +
        + +
        +
        + + + +
        + + pystiche: A Framework for Neural Style Transfer + +
        +
        Philip Meier, Volker Lohweg
        +

        The seminal work of Gatys, Ecker, and Bethge gave birth to the field of _Neural Style Transfer_ (NST) in 2016. An NST describes the merger between the content and artistic style of two arbitrary images. This idea is nothing new in the field of Non-photorealistic rendering (NPR). What distinguishes NST from traditional NPR approaches is its generality: an NST only needs a single arbitrary content and style image as input and thus "makes -- for the first time -- a generalized style transfer practicable". Besides peripheral tasks, an NST at its core is the definition of an optimization criterion called _perceptual loss_, which estimates the perceptual quality of the stylized image. Usually the perceptual loss comprises a deep neural network that needs to supply encodings of images from various depths. + +`pystiche` is a library for NST written in Python and built upon PyTorch. It provides modular and efficient implementations for commonly used perceptual losses as well as neural net architectures. This enables users to mix current state-of-the-art techniques with new ideas with ease. This poster will showcase the core concepts of `pystiche` that will enable other researchers as well as lay persons to got an NST running in minutes.

        + +

        + https://github.com/pmeier/pystiche +

        + +

        + Vision +

        +
        +
        + +
        +
        + + + +
        + + GaNDLF – A Generally Nuanced Deep Learning Framework for Clinical Imaging Workflows + +
        +
        Siddhish Thakur
        +

        Deep Learning (DL) has greatly highlighted the potential impact of optimized machine learning in both the scientific +and clinical communities. The advent of open-source DL libraries from major industrial entities, such as TensorFlow +(Google), PyTorch (Facebook), further contributes to DL promises on the democratization of computational analytics. However, increased technical and specialized background is required to develop DL algorithms, and the variability of implementation details hinders their reproducibility. Towards lowering the barrier and making the mechanism of DL development, training, and inference more stable, reproducible, and scalable, without requiring an extensive technical background, this manuscript proposes the Generally Nuanced Deep Learning Framework (GaNDLF). With built-in support for k-fold cross-validation, data augmentation, multiple modalities and output classes, and multi-GPU training, as well as the ability to work with both radiographic and histologic imaging, GaNDLF aims to provide an end-to-end solution for all DL-related tasks, to tackle problems in medical imaging and provide a robust application framework for deployment in clinical workflows. + +Keywords: Deep Learning, Framework, Segmentation, Regression, Classification, Cross-validation, Data +augmentation, Deployment, Clinical, Workflows

        + +

        + +

        + +

        + Vision +

        +
        +
        + + + + + + + + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/pykale/index.html b/ecosystem/pykale/index.html new file mode 100644 index 000000000000..b1973688354a --- /dev/null +++ b/ecosystem/pykale/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + PyKale | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        PyKale

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        PyKale is a PyTorch library for multimodal learning and transfer learning with deep learning and dimensionality reduction on graphs, images, texts, and videos.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/pypose/index.html b/ecosystem/pypose/index.html new file mode 100644 index 000000000000..c89c50b1df46 --- /dev/null +++ b/ecosystem/pypose/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + PyPose | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        PyPose

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        PyPose is a robotics-oriented, PyTorch-based library that combines deep perceptual models with physics-based optimization techniques, so that users can focus on their novel applications.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/pypots/index.html b/ecosystem/pypots/index.html new file mode 100644 index 000000000000..41af93962b13 --- /dev/null +++ b/ecosystem/pypots/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + PyPOTS | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        PyPOTS

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        A Python toolbox for data mining on Partially-Observed Time Series (POTS) and helps engineers focus more on the core problems in rather than missing parts in their data.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/pyro/index.html b/ecosystem/pyro/index.html new file mode 100644 index 000000000000..9809a3f87567 --- /dev/null +++ b/ecosystem/pyro/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

        Redirecting…

        + Click here if you are not redirected. + diff --git a/ecosystem/pystiche/index.html b/ecosystem/pystiche/index.html new file mode 100644 index 000000000000..c2460cf51e21 --- /dev/null +++ b/ecosystem/pystiche/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + pystiche | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        pystiche

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        pystiche is a framework for Neural Style Transfer (NST) built upon PyTorch.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/pysyft/index.html b/ecosystem/pysyft/index.html new file mode 100644 index 000000000000..692c2a0668b5 --- /dev/null +++ b/ecosystem/pysyft/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

        Redirecting…

        + Click here if you are not redirected. + diff --git a/ecosystem/pytorch-geometric/index.html b/ecosystem/pytorch-geometric/index.html new file mode 100644 index 000000000000..ae6384b33560 --- /dev/null +++ b/ecosystem/pytorch-geometric/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

        Redirecting…

        + Click here if you are not redirected. + diff --git a/ecosystem/pytorch-lightning/index.html b/ecosystem/pytorch-lightning/index.html new file mode 100644 index 000000000000..81a3ef46c041 --- /dev/null +++ b/ecosystem/pytorch-lightning/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

        Redirecting…

        + Click here if you are not redirected. + diff --git a/ecosystem/pytorch-metric-learning/index.html b/ecosystem/pytorch-metric-learning/index.html new file mode 100644 index 000000000000..5441002e7578 --- /dev/null +++ b/ecosystem/pytorch-metric-learning/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + PyTorch Metric Learning | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        PyTorch Metric Learning

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        The easiest way to use deep metric learning in your application. Modular, flexible, and extensible.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/pytorch-nlp/index.html b/ecosystem/pytorch-nlp/index.html new file mode 100644 index 000000000000..00516eb82c1a --- /dev/null +++ b/ecosystem/pytorch-nlp/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + PyTorch-NLP | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        PyTorch-NLP

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Basic Utilities for PyTorch Natural Language Processing (NLP).

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/pytorch3d/index.html b/ecosystem/pytorch3d/index.html new file mode 100644 index 000000000000..b48705180e0a --- /dev/null +++ b/ecosystem/pytorch3d/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + PyTorch3D | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        PyTorch3D

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        PyTorch3D provides efficient, reusable components for 3D Computer Vision research with PyTorch.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/pytorch_geometric_temporal/index.html b/ecosystem/pytorch_geometric_temporal/index.html new file mode 100644 index 000000000000..6812a989bf37 --- /dev/null +++ b/ecosystem/pytorch_geometric_temporal/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + PyTorch Geometric Temporal | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        PyTorch Geometric Temporal

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        PyTorch Geometric Temporal is a temporal (dynamic) extension library for PyTorch Geometric.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/pytorchfi/index.html b/ecosystem/pytorchfi/index.html new file mode 100644 index 000000000000..66cb05c951a2 --- /dev/null +++ b/ecosystem/pytorchfi/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + pytorchfi | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        pytorchfi

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        A runtime fault injection tool for PyTorch.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/pytorchvideo/index.html b/ecosystem/pytorchvideo/index.html new file mode 100644 index 000000000000..55ad545b0fc2 --- /dev/null +++ b/ecosystem/pytorchvideo/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + PyTorchVideo | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        PyTorchVideo

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        A deep learning library for video understanding research. Hosts various video-focused models, datasets, training pipelines and more.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/rastervision/index.html b/ecosystem/rastervision/index.html new file mode 100644 index 000000000000..465e8a3923b5 --- /dev/null +++ b/ecosystem/rastervision/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + raster-vision | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        raster-vision

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        An open source framework for deep learning on satellite and aerial imagery.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/ray/index.html b/ecosystem/ray/index.html new file mode 100644 index 000000000000..65a7490cd6e7 --- /dev/null +++ b/ecosystem/ray/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Ray | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Ray

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Ray is a fast and simple framework for building and running distributed applications.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/renate/index.html b/ecosystem/renate/index.html new file mode 100644 index 000000000000..5b3cd77e9a21 --- /dev/null +++ b/ecosystem/renate/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Renate | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Renate

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Renate is a library providing tools for re-training pytorch models over time as new data becomes available.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/roma/index.html b/ecosystem/roma/index.html new file mode 100644 index 000000000000..1378cd840643 --- /dev/null +++ b/ecosystem/roma/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

        Redirecting…

        + Click here if you are not redirected. + diff --git a/ecosystem/simulai/index.html b/ecosystem/simulai/index.html new file mode 100644 index 000000000000..931d9ff049cc --- /dev/null +++ b/ecosystem/simulai/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + SimulAI | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        SimulAI

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        SimulAI is basically a toolkit with pipelines for physics-informed machine learning.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/skorch/index.html b/ecosystem/skorch/index.html new file mode 100644 index 000000000000..1ff6b699f373 --- /dev/null +++ b/ecosystem/skorch/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

        Redirecting…

        + Click here if you are not redirected. + diff --git a/ecosystem/stable-baselines3/index.html b/ecosystem/stable-baselines3/index.html new file mode 100644 index 000000000000..cf16767f8409 --- /dev/null +++ b/ecosystem/stable-baselines3/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Stable Baselines3 | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Stable Baselines3

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Stable Baselines3 (SB3) is a set of reliable implementations of reinforcement learning algorithms in PyTorch.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/stoke/index.html b/ecosystem/stoke/index.html new file mode 100644 index 000000000000..79f3f5e4654a --- /dev/null +++ b/ecosystem/stoke/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + stoke | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        stoke

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        A lightweight declarative PyTorch wrapper for context switching between devices, distributed modes, mixed-precision, and PyTorch extensions.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/substra/index.html b/ecosystem/substra/index.html new file mode 100644 index 000000000000..0d3509a2392d --- /dev/null +++ b/ecosystem/substra/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Substra | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Substra

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Substra is a federated learning Python library to run federated learning experiments at scale on real distributed data.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/tensorly/index.html b/ecosystem/tensorly/index.html new file mode 100644 index 000000000000..4dc941d07329 --- /dev/null +++ b/ecosystem/tensorly/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

        Redirecting…

        + Click here if you are not redirected. + diff --git a/ecosystem/textbrewer/index.html b/ecosystem/textbrewer/index.html new file mode 100644 index 000000000000..666f3237c607 --- /dev/null +++ b/ecosystem/textbrewer/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + TextBrewer | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        TextBrewer

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        A PyTorch-based knowledge distillation toolkit for natural language processing

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/tiatoolbox/index.html b/ecosystem/tiatoolbox/index.html new file mode 100644 index 000000000000..663ceb5f03e2 --- /dev/null +++ b/ecosystem/tiatoolbox/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + TIAToolbox | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        TIAToolbox

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        TIAToolbox provides an easy-to-use API where researchers can use, adapt and create models for CPath.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/torchdistill/index.html b/ecosystem/torchdistill/index.html new file mode 100644 index 000000000000..a7bb51b3b26d --- /dev/null +++ b/ecosystem/torchdistill/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + torchdistill | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        torchdistill

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        torchdistill is a coding-free framework built on PyTorch for reproducible deep learning and knowledge distillation studies.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/torchdrift/index.html b/ecosystem/torchdrift/index.html new file mode 100644 index 000000000000..47b572e86fad --- /dev/null +++ b/ecosystem/torchdrift/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + TorchDrift | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        TorchDrift

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        TorchDrift is a data and concept drift library for PyTorch. It lets you monitor your PyTorch models to see if they operate within spec.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/torchdrug/index.html b/ecosystem/torchdrug/index.html new file mode 100644 index 000000000000..f42cab7201c2 --- /dev/null +++ b/ecosystem/torchdrug/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + torchdrug | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        torchdrug

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        A powerful and flexible machine learning platform for drug discovery.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/torchgeo/index.html b/ecosystem/torchgeo/index.html new file mode 100644 index 000000000000..868cc6c3cb51 --- /dev/null +++ b/ecosystem/torchgeo/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + torchgeo | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        torchgeo

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Datasets, transforms, and models for geospatial data

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/torchio/index.html b/ecosystem/torchio/index.html new file mode 100644 index 000000000000..cc59735c6763 --- /dev/null +++ b/ecosystem/torchio/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + TorchIO | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        TorchIO

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        TorchIO is a set of tools to efficiently read, preprocess, sample, augment, and write 3D medical images in deep learning applications written in PyTorch.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/torchmetrics/index.html b/ecosystem/torchmetrics/index.html new file mode 100644 index 000000000000..31dcbe7d7cd2 --- /dev/null +++ b/ecosystem/torchmetrics/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + TorchMetrics | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        TorchMetrics

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        Machine learning metrics for distributed, scalable PyTorch applications.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/torchopt/index.html b/ecosystem/torchopt/index.html new file mode 100644 index 000000000000..0b55ee5a4e1a --- /dev/null +++ b/ecosystem/torchopt/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + TorchOpt | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        TorchOpt

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        TorchOpt is a PyTorch-based library for efficient differentiable optimization.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/torchpoints3d/index.html b/ecosystem/torchpoints3d/index.html new file mode 100644 index 000000000000..cd6f590fe6ad --- /dev/null +++ b/ecosystem/torchpoints3d/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + PyTorch-Points3d | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        PyTorch-Points3d

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        A PyTorch framework for deep learning on point clouds.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/torchquantum/index.html b/ecosystem/torchquantum/index.html new file mode 100644 index 000000000000..fc6e1d61b9e9 --- /dev/null +++ b/ecosystem/torchquantum/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + TorchQuantum | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        TorchQuantum

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        TorchQuantum is a quantum classical simulation framework based on PyTorch. It supports statevector, density matrix simulation and pulse simulation on different hardware platforms such as CPUs and GPUs.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/trains/index.html b/ecosystem/trains/index.html new file mode 100644 index 000000000000..5f882a32c851 --- /dev/null +++ b/ecosystem/trains/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Clear ML | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Clear ML

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        ClearML is a full system ML / DL experiment manager, versioning and ML-Ops solution.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/transformers/index.html b/ecosystem/transformers/index.html new file mode 100644 index 000000000000..2b5d4b310c05 --- /dev/null +++ b/ecosystem/transformers/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Transformers | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Transformers

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        State-of-the-art Natural Language Processing for PyTorch.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/trtorch/index.html b/ecosystem/trtorch/index.html new file mode 100644 index 000000000000..0a6bf046f2ce --- /dev/null +++ b/ecosystem/trtorch/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + Torch-TensorRT | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        Torch-TensorRT

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        PyTorch/TorchScript compiler for NVIDIA GPUs using TensorRT

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/usb/index.html b/ecosystem/usb/index.html new file mode 100644 index 000000000000..d0f8df10453e --- /dev/null +++ b/ecosystem/usb/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + USB | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        USB

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        USB is a Pytorch-based Python package for Semi-Supervised Learning (SSL). It is easy-to-use/extend, affordable to small groups, and comprehensive for developing and evaluating SSL algorithms.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/vissl/index.html b/ecosystem/vissl/index.html new file mode 100644 index 000000000000..79d727c7c317 --- /dev/null +++ b/ecosystem/vissl/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + VISSL | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        VISSL

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        A library for state-of-the-art self-supervised learning

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ecosystem/vllm/index.html b/ecosystem/vllm/index.html new file mode 100644 index 000000000000..95b2f75e90d7 --- /dev/null +++ b/ecosystem/vllm/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + + + vllm | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + +
        +
        +

        vllm

        + + + + + Group 5 + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + +

        vllm is a high-throughput and memory-efficient inference and serving engine for LLMs.

        + + + + +
        +
        + +
        +
        +
        +
        + +
        +
        + +
        + +
        +
        +
        + +
        +
        +
        +
        +

        Docs

        +

        Access comprehensive developer documentation for PyTorch

        + View Docs +
        + +
        +

        Tutorials

        +

        Get in-depth tutorials for beginners and advanced developers

        + View Tutorials +
        + +
        +

        Resources

        +

        Find development resources and get your questions answered

        + View Resources +
        +
        +
        +
        + +
        + +
        + +
        +
        +
        +
        + + +
        +
        +
        + + +
        + + + + + + + + + + + + + + + + + + + + + + + diff --git a/feed.xml b/feed.xml index fb5fff5e4663..481f4e114439 100644 --- a/feed.xml +++ b/feed.xml @@ -1,99 +1,35 @@ ---- -layout: null -collection: posts ---- - -{% if page.xsl %} - -{% endif %} - - Jekyll - - - {{ site.time | date_to_xmlschema }} - {{ page.url | absolute_url | xml_escape }} - {% assign title = site.title | default: site.name %} - {% if page.collection != "posts" %} - {% assign collection = page.collection | capitalize %} - {% assign title = title | append: " | " | append: collection %} - {% endif %} - {% if page.category %} - {% assign category = page.category | capitalize %} - {% assign title = title | append: " | " | append: category %} - {% endif %} + + Jekyll + + + 2025-08-22T12:36:04-07:00 + https://pytorch.org/feed.xml + + + + - {% if title %} - {{ title | smartify | xml_escape }} - {% endif %} + + PyTorch Website + - {% if site.description %} - {{ site.description | xml_escape }} - {% endif %} + + Scientific Computing... + - {% if site.author %} + - {{ site.author.name | default: site.author | xml_escape }} - {% if site.author.email %} - {{ site.author.email | xml_escape }} - {% endif %} - {% if site.author.uri %} - {{ site.author.uri | xml_escape }} - {% endif %} + Facebook + + - {% endif %} - - {% assign posts = site[page.collection] | where_exp: "post", "post.draft != true" | sort: "date" | reverse %} - {% if page.category %} - {% assign posts = posts | where: "category",page.category %} - {% endif %} - {% for post in posts limit: 10 %} - - {{ post.title | smartify | strip_html | normalize_whitespace | xml_escape }} - - {{ post.date | date_to_xmlschema }} - {{ post.last_modified_at | default: post.date | date_to_xmlschema }} - {{ post.id | absolute_url | xml_escape }} - {{ post.content | strip | xml_escape }} - - {% assign post_author = post.author | default: post.authors[0] | default: site.author %} - {% assign post_author = site.data.authors[post_author] | default: post_author %} - {% assign post_author_email = post_author.email | default: nil %} - {% assign post_author_uri = post_author.uri | default: nil %} - {% assign post_author_name = post_author.name | default: post_author %} - - - {{ post_author_name | default: "" | xml_escape }} - {% if post_author_email %} - {{ post_author_email | xml_escape }} - {% endif %} - {% if post_author_uri %} - {{ post_author_uri | xml_escape }} - {% endif %} - - - {% if post.category %} - - {% endif %} - - {% for tag in post.tags %} - - {% endfor %} - - {% if post.excerpt and post.excerpt != empty %} - {{ post.excerpt | strip_html | normalize_whitespace | xml_escape }} - {% endif %} + - {% assign post_image = post.image.path | default: post.image %} - {% if post_image %} - {% unless post_image contains "://" %} - {% assign post_image = post_image | absolute_url %} - {% endunless %} - - {% endif %} - - {% endfor %} + + + diff --git a/get-started/cloud-partners/index.html b/get-started/cloud-partners/index.html new file mode 100644 index 000000000000..09f622a31d76 --- /dev/null +++ b/get-started/cloud-partners/index.html @@ -0,0 +1,699 @@ + + + + + + + + + + + + + Start via Cloud Partners | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
        +
        + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
        +
        +
        +
        + + +
        + + + + + + + + +
        + +
        +
        + + +
        + + + +
        +
        +

        Get Started

        + +

        + Select preferences and run the command to install PyTorch locally, or + get started quickly with one of the supported cloud platforms. +

        +
        +
        + +
        +
        + + +
        +
        +
        +
        +
          +
          + +
          +
          +
          +
          +

          Start via Cloud Partners

          + +
          +
          +

          Cloud platforms provide powerful hardware and infrastructure for training and deploying deep learning models. Select a cloud platform below to get started with PyTorch.

          +
          + + +
          +
          +
          + Google Cloud Platform +
          + + + + + +
          +
          + +
          +
          +
          +

          Microsoft Azure

          +
          + + +
          +
          + +
          +
          +
          + Lightning Studios +
          + +
          +
          +
          + +
          +
          + +
          + +
          +
          +

          Using PyTorch with AWS

          + +

          To gain the full experience of what PyTorch has to offer, a machine with at least one dedicated NVIDIA GPU is necessary. While it is not always practical to have your own machine with these specifications, there are our cloud based solutions to allow you to test and use PyTorch’s full features.

          + +

          AWS provides both:

          + +
            +
          • Deep Learning AMIs: dedicated, pre-built machine learning instances, complete with PyTorch
          • +
          • Deep Learning Base AMI: bare Linux and Windows instances for you to do a custom install of PyTorch.
          • +
          + +

          Quick Start on Deep Learning AMI

          + +

          If you want to get started with a Linux AWS instance that has PyTorch already installed and that you can login into from the command-line, this step-by-step guide will help you do that.

          + +
            +
          1. Sign into your AWS console. If you do not have an AWS account, see the primer below.
          2. +
          3. Click on Launch a virtual machine.
          4. +
          5. Select Deep Learning AMI (Ubuntu). +
            +

            This gives you an instance with a pre-defined version of PyTorch already installed. If you wanted a bare AWS instance that required PyTorch to be installed, you could choose the Deep Learning Base AMI (Ubuntu), which will have the hardware, but none of the software already available.

            +
            +
          6. +
          7. Choose a GPU compute p3.2xlarge instance type. +
            +

            You can choose any of the available instances to try PyTorch, even the free-tier, but it is recommended for best performance that you get a GPU compute or Compute optimized instance. Other instance options include the Compute Optimized c5-series (e.g., c5.2xlarge) or the General Compute t2-series or t3-series (e.g., t2.2xlarge). It is important to note that if you choose an instance without a GPU, PyTorch will only be running in CPU compute mode, and operations may take much, much longer.

            +
            +
          8. +
          9. Click on Review and Launch.
          10. +
          11. Review the instance information and click Launch.
          12. +
          13. You will want to Create a new key pair if you do not have one already to use. Pick a name and download it locally via the Download Key Pair button.
          14. +
          15. Now click on Launch Instances. You now have a live instance to use for PyTorch. If you click on View Instances, you will see your running instance.
          16. +
          17. Take note of the Public DNS as this will be used to ssh into your instance from the command-line.
          18. +
          19. Open a command-line prompt
          20. +
          21. Ensure that your key-pair has the proper permissions, or you will not be able to log in. Type chmod 400 path/to/downloaded/key-pair.pem.
          22. +
          23. Type ssh -i path/to/downloaded/key-pair.pem ubuntu@<Public DNS that you noted above>. e.g., ssh -i ~/Downloads/aws-quick-start.pem ubuntu@ec2-55-181-112-129.us-west-2.compute.amazonaws.com. If asked to continue connection, type yes.
          24. +
          25. You should now see a prompt similar to ubuntu@ip-100-30-20-95. If so, you are now connected to your instance.
          26. +
          27. Verify that PyTorch is installed by running the verification steps below. +
            +

            If you chose the Deep Learning Base AMI (Ubuntu) instead of the Deep Learning AMI (Ubuntu), then you will need to install PyTorch. Follow the Linux getting started instructions in order to install it.

            +
            +
          28. +
          + +

          Quick Start Verification

          + +

          To ensure that PyTorch was installed correctly, we can verify the installation by running sample PyTorch code. Here we will construct a randomly initialized tensor.

          + +
          import torch
          +x = torch.rand(5, 3)
          +print(x)
          +
          + +

          The output should be something similar to:

          + +
          tensor([[0.3380, 0.3845, 0.3217],
          +        [0.8337, 0.9050, 0.2650],
          +        [0.2979, 0.7141, 0.9069],
          +        [0.1449, 0.1132, 0.1375],
          +        [0.4675, 0.3947, 0.1426]])
          +
          + +

          Additionally, to check if your GPU driver and CUDA is enabled and accessible by PyTorch, run the following commands to return whether or not the CUDA driver is enabled:

          + +
          import torch
          +torch.cuda.is_available()
          +
          + +
          + Show Demo +
          + + Hide Demo +
          +
          + +

          AWS Primer

          + +

          Generally, you will be using Amazon Elastic Compute Cloud (or EC2) to spin up your instances. Amazon has various instance types, each of which are configured for specific use cases. For PyTorch, it is highly recommended that you use the accelerated computing instances that feature GPUs or custom AI/ML accelerators as they are tailored for the high compute needs of machine learning.

          + +

          In order to use AWS, you need to set up an AWS account, if you do not have one already. You will create a username (your email address), password and an AWS account name (since you can create multiple AWS accounts for different purposes). You will also provide contact and billing information. The billing information is important because while AWS does provide what they call “free-tier” instances, to use PyTorch you will want more powerful, paid instances.

          + +

          Once you are logged in, you will be brought to your AWS console. You can even learn more about AWS through a set of simple tutorials.

          + +

          AWS Inferentia-based instances

          + +

          AWS Inferentia is a chip custom built by AWS to provide higher performance and low cost machine learning inference in the cloud. Amazon EC2 Inf1 instances feature up to 16 AWS Inferentia chips, the latest second generation Intel Xeon Scalable processors, and up to 100 Gbps networking to enable high throughput and lowest cost inference in the cloud. You can use Inf1 instances with Amazon SageMaker for a fully managed workflow, or use the AWS Neuron SDK directly which is integrated with PyTorch.

          + +

          GPU-based instances

          + +

          Amazon EC2 P4d instances deliver the highest performance for machine learning training on AWS. They are powered by the latest NVIDIA A100 Tensor Core GPUs and feature first in the cloud 400 Gbps instance networking. P4d instances are deployed in hyperscale clusters called EC2 UltraClusters that are comprised of more than 4,000 NVIDIA A100 GPUs, Petabit-scale non-blocking networking, and scalable low latency storage with FSx for Lustre. Each EC2 UltraCluster provides supercomputer-class performance to enable you to solve the most complex multi-node ML training tasks.

          + +

          For ML inference, AWS Inferentia-based Inf1 instances provide the lowest cost inference in the cloud. Additionally, Amazon EC2 G4dn instances featuring NVIDIA T4 GPUs are optimized for GPU-based machine learning inference and small scale training that leverage NVIDIA libraries.

          + +

          Creating and Launching an Instance

          + +

          Once you decided upon your instance type, you will need to create, optionally configure and launch your instance. You can connect to your instance from the web browser or a command-line interface. Here are guides for instance launch for various platforms:

          + + + +

          Amazon SageMaker

          + +

          With SageMaker service AWS provides a fully-managed service that allows developers and data scientists to build, train, and deploy machine learning models.

          + +

          See AWS documentation to learn how to configure Amazon SageMaker with PyTorch.

          + +

          Pre-Built AMIs

          + +

          AWS provides instances (called AWS Deep Learning AMIs) pre-built with a modern version of PyTorch. The available AMIs are:

          + +
            +
          • Ubuntu
          • +
          • Amazon Linux
          • +
          • Windows 2016
          • +
          + +

          Amazon has written a good blog post on getting started with pre-built AMI.

          + +

          Installing PyTorch From Scratch

          + +

          You may prefer to start with a bare instance to install PyTorch. Once you have connected to your instance, setting up PyTorch is the same as setting up locally for your operating system of choice.

          + +
          +
          +

          Using PyTorch with Google Cloud

          + +

          To gain the full experience of what PyTorch has to offer, a machine with at least one dedicated NVIDIA GPU is necessary. While it is not always practical to have your own machine with these specifications, there are our cloud based solutions to allow you to test and use PyTorch’s full features.

          + +

          Google Cloud provides both:

          + + + +

          Google Cloud Primer

          + +

          In order to use Google Cloud, you need to set up an Google account, if you do not have one already. You will create a username (typically an @gmail.com email address) and password. Afterwards, you will be able to try Google Cloud. You will also provide contact and billing information. The billing information is initially used to prove you are a real person. And then, after your trial, you can choose to upgrade to a paid account.

          + +

          Once you are logged in, you will be brought to your Google Cloud console. You can even learn more about Google Cloud through a set of simple tutorials.

          + +

          Cloud Deep Learning VM Image

          + +

          Google Cloud provides no setup required, pre-configured virtual machines to help you build your deep learning projects. Cloud Deep Learning VM Image is a set of Debian-based virtual machines that allow you to build and run machine PyTorch learning based applications.

          + +

          GPU-based Virtual Machines

          + +

          For custom virtual machines, generally you will want to use Compute Engine Virtual Machine instances), with GPU enabled, to build with PyTorch. Google has various virtual machine types and pricing options, with both Linux and Windows, all of which can be configured for specific use cases. For PyTorch, it is highly recommended that you use a GPU-enabled virtual machines. They are tailored for the high compute needs of machine learning.

          + +

          The expense of your virtual machine is directly correlated to the number of GPUs that it contains. One NVIDIA Tesla P100 virtual machine, for example, can actually be suitable for many use cases.

          + +

          Deep Learning Containers

          + +

          Google Cloud also offers pre-configured and optimized Deep Learning Containers. They provide a consistent environment across Google Cloud services, making it easy to scale in the cloud or shift from on-premises. You have the flexibility to deploy on Google Kubernetes Engine (GKE), AI Platform, Cloud Run, Compute Engine, Kubernetes, and Docker Swarm.

          + +

          Installing PyTorch From Scratch

          + +

          You may prefer to start with a bare instance to install PyTorch. Once you have connected to your instance, setting up PyTorch is the same as setting up locally for your operating system of choice.

          + +
          +
          +

          Using PyTorch with Azure

          + +

          To gain the full experience of what PyTorch has to offer, a machine with at least one dedicated NVIDIA GPU is necessary. While it is not always practical to have your own machine with these specifications, there are our cloud based solutions to allow you to test and use PyTorch’s full features.

          + +

          Azure provides:

          + + + +

          Azure Primer

          + +

          In order to use Azure, you need to set up an Azure account, if you do not have one already. You will use a Microsoft-recognized email address and password. You will also verify your identity by providing contact and billing information. The billing information is necessary because while Azure does provide free usage credits and free services, you may need or want higher-end services as well.

          + +

          Once you are logged in, you will be brought to your Azure portal. You can even learn more about Azure through a set of simple video tutorials.

          + +

          Azure Machine Learning Service

          + +

          The Azure Machine Learning service is a cloud-based service you can use to accelerate your end-to-end machine learning workflows, from training to production. Azure Machine Learning allows you to easily move from training PyTorch models on your local machine to scaling out to the cloud. Using Azure ML’s CLI or Python SDK, you can leverage the service’s advanced functionality for distributed training, hyperparameter tuning, run history tracking, and production-scale model deployments.

          + +

          See the documentation to learn how to use PyTorch with Azure Machine Learning.

          + +

          Pre-Configured Data Science Virtual Machines

          + +

          Azure provides pre-configured data learning and machine learning virtual machines. PyTorch are available on many of these - for example here is the documentation for how to setup an Azure virtual machine on Ubuntu Linux.

          + +

          GPU-based Virtual Machines

          + +

          Microsoft has various virtual machine types and pricing options, with both Linux and Windows, all of which are configured for specific use cases. For PyTorch, it is highly recommended that you use the GPU optimized, virtual machines. They are tailored for the high compute needs of machine learning.

          + +

          The expense of your virtual machine is directly correlated to the number of GPUs that it contains. The NC6 virtual machine is, for example, one of the smallest, cheapest virtual machines and can actually be suitable for many use cases.

          + +

          Installing PyTorch From Scratch

          + +

          You may prefer to start with a bare virtual machine to install PyTorch. Once you have connected to your virtual machine, setting up PyTorch is the same as setting up locally for your operating system of choice.

          + +
          +
          +

          Using PyTorch with Lightning Studios

          + +

          Lightning Studios let you fully experience PyTorch and its ecosystem on accelerated compute in seconds. You can pick a GPU and customize from your browser or any local IDE with zero setup.

          + +

          Lightning Studios provide:

          + +
            +
          • ready-to-use environments that come with PyTorch and PyTorch Lightning pre-installed
          • +
          • accelerated computing on GPUs such as L4, L40S, and H100, and the ability to switch between them in seconds
          • +
          • optimized multi-node training, to scale up PyTorch training jobs across machines
          • +
          + +

          Lightning Studios enable you to share fully reproducible environments preloaded with everything you need to build AI systems, like data processing, pretraining, finetuning, inference, etc. Our library of 2K community-built, open sourced templates have pre-installed dependencies, model weights, data, code and more.

          + +

          Getting Started

          + + + +

          With Studios, you can:

          + +
            +
          • Pay-as-you-go
          • +
          • Get GPUs from $0.40 p/h
          • +
          • Use your own AWS credits
          • +
          • Access 24/7 Enterprise support
          • +
          + +

          Build AI, not infrastructure

          + +

          With Lightning Studios, you can easily build AI products with full and low code tools in one place, plus access GPUs, train models and deploy.

          + +

          AI products like Stable Diffusion and NVIDIA’s NeMo are built with Lightning. Whether you’re experimenting with your first model, AI app, or deploying AI at enterprise scale. Lightning powers every stage — even pretraining LLMs on 10,000+ GPUs.

          +
          +
          + + + + + + + + + + +
          +
          +
          +
          + +
          +
          +
          + +
          +
          +
          +
          +

          Docs

          +

          Access comprehensive developer documentation for PyTorch

          + View Docs +
          + +
          +

          Tutorials

          +

          Get in-depth tutorials for beginners and advanced developers

          + View Tutorials +
          + +
          +

          Resources

          +

          Find development resources and get your questions answered

          + View Resources +
          +
          +
          +
          + +
          + +
          + +
          +
          +
          +
          + + +
          +
          +
          + + +
          + + + + + + + + + + + + + + + + + + + + + + + diff --git a/get-started/colab/index.html b/get-started/colab/index.html new file mode 100644 index 000000000000..81f641656e57 --- /dev/null +++ b/get-started/colab/index.html @@ -0,0 +1,407 @@ + + + + + + + + + + + + + Try Now via CoLab | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          +
          + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
          +
          +
          +
          + + +
          + + + + + + + + +
          + +
          +
          + + +
          + + + +
          +
          +

          Get Started

          + +

          + Select preferences and run the command to install PyTorch locally, or + get started quickly with one of the supported cloud platforms. +

          +
          +
          + +
          +
          + + +
          + + +
          +

          Try Now via CoLab

          + +

          Lorem ipsum dolor sit amet, ex mei graeco alienum imperdiet. Recusabo consequuntur mei ei, habeo iriure virtute eam cu, in erat placerat vis. Eu mea nostrum inimicus, cum id aeque utamur erroribus.

          + +

          Lorem ipsum dolor sit amet, ex mei graeco alienum imperdiet. Recusabo consequuntur mei ei, habeo iriure virtute eam cu, in erat placerat vis. Eu mea nostrum inimicus, cum id aeque utamur erroribus.

          + +
          #!/usr/bin/python3
          +
          +# Print the contents of the files listed on the command line.
          +
          +import sys
          +
          +for fn in sys.argv[1:]:
          +    try:
          +        fin = open(fn, 'r')
          +    except:
          +        (type, detail) = sys.exc_info()[:2]
          +        print("\n*** %s: %s: %s ***" % (fn, type, detail))
          +        continue
          +    print("\n*** Contents of", fn, "***")
          +
          +    # Print the file, with line numbers.
          +    lno = 1
          +    while 1:
          +        line = fin.readline()
          +        if not line: break;
          +        print('%3d: %-s' % (lno, line[:-1]))
          +        lno = lno + 1
          +    fin.close()
          +print()
          + +

          Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.

          + + + + + + + + + +
          +
          + + +
          +
          +
          + +
          +
          +
          +
          +

          Docs

          +

          Access comprehensive developer documentation for PyTorch

          + View Docs +
          + +
          +

          Tutorials

          +

          Get in-depth tutorials for beginners and advanced developers

          + View Tutorials +
          + +
          +

          Resources

          +

          Find development resources and get your questions answered

          + View Resources +
          +
          +
          +
          + +
          + +
          + +
          +
          +
          +
          + + +
          +
          +
          + + +
          + + + + + + + + + + + + + + + + + + + + + + + diff --git a/get-started/executorch/index.html b/get-started/executorch/index.html new file mode 100644 index 000000000000..12a6c9f9837f --- /dev/null +++ b/get-started/executorch/index.html @@ -0,0 +1,400 @@ + + + + + + + + + + + + + PyTorch for Edge | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          +
          + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
          +
          +
          +
          + + +
          + + + + + + + + +
          + +
          +
          + + +
          + + + +
          +
          +

          Get Started

          + +

          + Select preferences and run the command to install PyTorch locally, or + get started quickly with one of the supported cloud platforms. +

          +
          +
          + +
          +
          + + +
          + + +
          +

          Get Started with PyTorch ExecuTorch

          + +

          PyTorch’s edge specific library is ExecuTorch and is designed to be lightweight, very performant even on devices with constrained hardware such as mobile phones, embedded systems and microcontrollers.

          + +

          ExecuTorch relies heavily on PyTorch core technologies such as torch.compile and torch.export, and should be very familiar to anyone who has used PyTorch in the past.

          + +

          Getting Started

          +

          You can get started by following the general getting started guide or jump to the specific steps for your target device.

          + + + +

          Hardware Acceleration

          +

          ExecuTorch provides out of the box hardware acceleration for a growing number of chip manufacturers. See the following resources to learn more about how to leverage them:

          + + + + + + + +
          +
          + + +
          +
          +
          + +
          +
          +
          +
          +

          Docs

          +

          Access comprehensive developer documentation for PyTorch

          + View Docs +
          + +
          +

          Tutorials

          +

          Get in-depth tutorials for beginners and advanced developers

          + View Tutorials +
          + +
          +

          Resources

          +

          Find development resources and get your questions answered

          + View Resources +
          +
          +
          +
          + +
          + +
          + +
          +
          +
          +
          + + +
          +
          +
          + + +
          + + + + + + + + + + + + + + + + + + + + + + + diff --git a/get-started/index.html b/get-started/index.html new file mode 100644 index 000000000000..2f545f7f1db8 --- /dev/null +++ b/get-started/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

          Redirecting…

          + Click here if you are not redirected. + diff --git a/get-started/locally/index.html b/get-started/locally/index.html new file mode 100644 index 000000000000..fb2561635686 --- /dev/null +++ b/get-started/locally/index.html @@ -0,0 +1,815 @@ + + + + + + + + + + + + + Start Locally | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
          +
          + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
          +
          +
          +
          + + +
          + + + + + + + + +
          + +
          +
          + + +
          + + + +
          +
          +

          Get Started

          + +

          + Select preferences and run the command to install PyTorch locally, or + get started quickly with one of the supported cloud platforms. +

          +
          +
          + +
          +
          + + +
          +
          +
          +
          +

          Shortcuts

          +
            +
            + + +
            +
            +
            +
            +

            Start Locally

            + +
            +
            +
            +

            Select your preferences and run the install command. Stable represents the most currently tested and supported version of PyTorch. This should + be suitable for many users. Preview is available if you want the latest, not fully tested and supported, builds that are generated nightly. + Please ensure that you have met the prerequisites below (e.g., numpy), depending on your package manager. You can also + install previous versions of PyTorch. Note that LibTorch is only available for C++. +

            + +

            NOTE: Latest PyTorch requires Python 3.9 or later.

            + +
            +
            +
            +
            PyTorch Build
            +
            +
            +
            Your OS
            +
            +
            +
            Package
            +
            +
            +
            Language
            +
            +
            +
            Compute Platform
            +
            +
            +
            Run this Command:
            +
            +
            + +
            +
            +
            +
            PyTorch Build
            +
            +
            +
            Stable (1.13.0)
            +
            +
            +
            Preview (Nightly)
            +
            +
            +
            +
            +
            Your OS
            +
            +
            +
            Linux
            +
            +
            +
            Mac
            +
            +
            +
            Windows
            +
            +
            +
            +
            +
            Package
            +
            +
            +
            Pip
            +
            +
            +
            LibTorch
            +
            +
            +
            Source
            +
            +
            +
            +
            +
            Language
            +
            +
            +
            Python
            +
            +
            +
            C++ / Java
            +
            +
            +
            +
            +
            Compute Platform
            +
            +
            +
            CUDA 11.8
            +
            +
            +
            CUDA 12.1
            +
            +
            +
            CUDA 12.4
            +
            +
            +
            ROCm 5.2
            +
            +
            +
            CPU
            +
            +
            +
            +
            +
            Run this Command:
            +
            +
            +
            pip install torch torchvision
            +
            +
            +
            +
            +
            + +
            +
            +
            + +
            + +
            +
            +

            Installing on macOS

            + +

            PyTorch can be installed and used on macOS. Depending on your system and GPU capabilities, your experience with PyTorch on macOS may vary in terms of processing time.

            + +

            Prerequisites

            + +

            macOS Version

            + +

            PyTorch is supported on macOS 10.15 (Catalina) or above.

            + +

            Python

            + +

            It is recommended that you use Python 3.9 - 3.12. +You can install Python either through Homebrew or +the Python website.

            + +

            Package Manager

            + +

            To install the PyTorch binaries, you will need to use the supported package manager: pip.

            +

            pip

            + +

            Python 3

            + +

            If you installed Python via Homebrew or the Python website, pip was installed with it. If you installed Python 3.x, then you will be using the command pip3.

            + +
            +

            Tip: If you want to use just the command pip, instead of pip3, you can symlink pip to the pip3 binary.

            +
            + +

            Installation

            + +

            pip

            + +

            To install PyTorch via pip, use the following command, depending on your Python version:

            + +
            # Python 3.x
            +pip3 install torch torchvision
            +
            + +

            Verification

            + +

            To ensure that PyTorch was installed correctly, we can verify the installation by running sample PyTorch code. Here we will construct a randomly initialized tensor.

            + +
            import torch
            +x = torch.rand(5, 3)
            +print(x)
            +
            + +

            The output should be something similar to:

            + +
            tensor([[0.3380, 0.3845, 0.3217],
            +        [0.8337, 0.9050, 0.2650],
            +        [0.2979, 0.7141, 0.9069],
            +        [0.1449, 0.1132, 0.1375],
            +        [0.4675, 0.3947, 0.1426]])
            +
            + +

            Building from source

            + +

            For the majority of PyTorch users, installing from a pre-built binary via a package manager will provide the best experience. However, there are times when you may want to install the bleeding edge PyTorch code, whether for testing or actual development on the PyTorch core. To install the latest PyTorch code, you will need to build PyTorch from source.

            + +

            Prerequisites

            + +
              +
            1. [Optional] Install pip
            2. +
            3. Follow the steps described here: https://github.com/pytorch/pytorch#from-source
            4. +
            + +

            You can verify the installation as described above.

            + +
            +
            +

            Installing on Linux

            + +

            PyTorch can be installed and used on various Linux distributions. Depending on your system and compute requirements, your experience with PyTorch on Linux may vary in terms of processing time. It is recommended, but not required, that your Linux system has an NVIDIA or AMD GPU in order to harness the full power of PyTorch’s CUDA support or ROCm support.

            + +

            Prerequisites

            + +

            Supported Linux Distributions

            + +

            PyTorch is supported on Linux distributions that use glibc >= v2.17, which include the following:

            + + + +
            +

            The install instructions here will generally apply to all supported Linux distributions. An example difference is that your distribution may support yum instead of apt. The specific examples shown were run on an Ubuntu 18.04 machine.

            +
            + +

            Python

            + +

            Python 3.9-3.12 is generally installed by default on any of our supported Linux distributions, which meets our recommendation.

            + +
            +

            Tip: By default, you will have to use the command python3 to run Python. If you want to use just the command python, instead of python3, you can symlink python to the python3 binary.

            +
            + +

            However, if you want to install another version, there are multiple ways:

            + + + +

            If you decide to use APT, you can run the following command to install it:

            + +
            sudo apt install python
            +
            + +

            Package Manager

            + +

            To install the PyTorch binaries, you will need to use the supported package manager: pip.

            +

            pip

            + +

            Python 3

            + +

            While Python 3.x is installed by default on Linux, pip is not installed by default.

            + +
            sudo apt install python3-pip
            +
            + +
            +

            Tip: If you want to use just the command pip, instead of pip3, you can symlink pip to the pip3 binary.

            +
            + +

            Installation

            + +

            pip

            + +

            No CUDA

            + +

            To install PyTorch via pip, and do not have a CUDA-capable or ROCm-capable system or do not require CUDA/ROCm (i.e. GPU support), in the above selector, choose OS: Linux, Package: Pip, Language: Python and Compute Platform: CPU. +Then, run the command that is presented to you.

            + +

            With CUDA

            + +

            To install PyTorch via pip, and do have a CUDA-capable system, in the above selector, choose OS: Linux, Package: Pip, Language: Python and the CUDA version suited to your machine. Often, the latest CUDA version is better. +Then, run the command that is presented to you.

            + +

            With ROCm

            + +

            To install PyTorch via pip, and do have a ROCm-capable system, in the above selector, choose OS: Linux, Package: Pip, Language: Python and the ROCm version supported. +Then, run the command that is presented to you.

            + +

            Verification

            + +

            To ensure that PyTorch was installed correctly, we can verify the installation by running sample PyTorch code. Here we will construct a randomly initialized tensor.

            + +
            import torch
            +x = torch.rand(5, 3)
            +print(x)
            +
            + +

            The output should be something similar to:

            + +
            tensor([[0.3380, 0.3845, 0.3217],
            +        [0.8337, 0.9050, 0.2650],
            +        [0.2979, 0.7141, 0.9069],
            +        [0.1449, 0.1132, 0.1375],
            +        [0.4675, 0.3947, 0.1426]])
            +
            + +

            Additionally, to check if your GPU driver and CUDA/ROCm is enabled and accessible by PyTorch, run the following commands to return whether or not the GPU driver is enabled (the ROCm build of PyTorch uses the same semantics at the python API level link, so the below commands should also work for ROCm):

            + +
            import torch
            +torch.cuda.is_available()
            +
            + +

            Building from source

            + +

            For the majority of PyTorch users, installing from a pre-built binary via a package manager will provide the best experience. However, there are times when you may want to install the bleeding edge PyTorch code, whether for testing or actual development on the PyTorch core. To install the latest PyTorch code, you will need to build PyTorch from source.

            + +

            Prerequisites

            + +
              +
            1. Install Pip
            2. +
            3. If you need to build PyTorch with GPU support +a. for NVIDIA GPUs, install CUDA, if your machine has a CUDA-enabled GPU. +b. for AMD GPUs, install ROCm, if your machine has a ROCm-enabled GPU
            4. +
            5. Follow the steps described here: https://github.com/pytorch/pytorch#from-source
            6. +
            + +

            You can verify the installation as described above.

            + +
            +
            +

            Installing on Windows

            + +

            PyTorch can be installed and used on various Windows distributions. Depending on your system and compute requirements, your experience with PyTorch on Windows may vary in terms of processing time. It is recommended, but not required, that your Windows system has an NVIDIA GPU in order to harness the full power of PyTorch’s CUDA support.

            + +

            Prerequisites

            + +

            Supported Windows Distributions

            + +

            PyTorch is supported on the following Windows distributions:

            + + + +
            +

            The install instructions here will generally apply to all supported Windows distributions. The specific examples shown will be run on a Windows 10 Enterprise machine

            +
            + +

            Python

            + +

            Currently, PyTorch on Windows only supports Python 3.9-3.12; Python 2.x is not supported.

            + +

            As it is not installed by default on Windows, there are multiple ways to install Python:

            + + + +
            +

            If you decide to use Chocolatey, and haven’t installed Chocolatey yet, ensure that you are running your command prompt as an administrator.

            +
            + +

            For a Chocolatey-based install, run the following command in an administrative command prompt:

            + +
            choco install python
            +
            + +

            Package Manager

            + +

            To install the PyTorch binaries, you will need to use the supported package manager: pip.

            +

            pip

            + +

            If you installed Python by any of the recommended ways above, pip will have already been installed for you.

            + +

            Installation

            + +

            pip

            + +

            No CUDA

            + +

            To install PyTorch via pip, and do not have a CUDA-capable system or do not require CUDA, in the above selector, choose OS: Windows, Package: Pip and CUDA: None. +Then, run the command that is presented to you.

            + +

            With CUDA

            + +

            To install PyTorch via pip, and do have a CUDA-capable system, in the above selector, choose OS: Windows, Package: Pip and the CUDA version suited to your machine. Often, the latest CUDA version is better. +Then, run the command that is presented to you.

            + +

            Verification

            + +

            To ensure that PyTorch was installed correctly, we can verify the installation by running sample PyTorch code. Here we will construct a randomly initialized tensor.

            + +

            From the command line, type:

            + +
            python
            +
            + +

            then enter the following code:

            + +
            import torch
            +x = torch.rand(5, 3)
            +print(x)
            +
            + +

            The output should be something similar to:

            + +
            tensor([[0.3380, 0.3845, 0.3217],
            +        [0.8337, 0.9050, 0.2650],
            +        [0.2979, 0.7141, 0.9069],
            +        [0.1449, 0.1132, 0.1375],
            +        [0.4675, 0.3947, 0.1426]])
            +
            + +

            Additionally, to check if your GPU driver and CUDA is enabled and accessible by PyTorch, run the following commands to return whether or not the CUDA driver is enabled:

            + +
            import torch
            +torch.cuda.is_available()
            +
            + +

            Building from source

            + +

            For the majority of PyTorch users, installing from a pre-built binary via a package manager will provide the best experience. However, there are times when you may want to install the bleeding edge PyTorch code, whether for testing or actual development on the PyTorch core. To install the latest PyTorch code, you will need to build PyTorch from source.

            + +

            Prerequisites

            + +
              +
            1. Install pip
            2. +
            3. Install CUDA, if your machine has a CUDA-enabled GPU.
            4. +
            5. If you want to build on Windows, Visual Studio with MSVC toolset, and NVTX are also needed. The exact requirements of those dependencies could be found out here.
            6. +
            7. Follow the steps described here: https://github.com/pytorch/pytorch#from-source
            8. +
            + +

            You can verify the installation as described above.

            + +
            +
            + + + + + + + + + + +
            +
            +
            +
            + +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + diff --git a/get-started/previous-versions/index.html b/get-started/previous-versions/index.html new file mode 100644 index 000000000000..2681af144ea3 --- /dev/null +++ b/get-started/previous-versions/index.html @@ -0,0 +1,2201 @@ + + + + + + + + + + + + + Previous PyTorch Versions | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + + + +
            +
            +

            Get Started

            + +

            + Select preferences and run the command to install PyTorch locally, or + get started quickly with one of the supported cloud platforms. +

            +
            +
            + +
            +
            + + +
            + + +
            +

            Installing previous versions of PyTorch

            + +

            We’d prefer you install the latest version, +but old binaries and installation instructions are provided below for +your convenience.

            + +

            Commands for Versions >= 1.0.0

            + +

            v2.6.0

            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0
            +
            + +
            Linux and Windows
            + +
            # ROCM 6.1 (Linux only)
            +pip install torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 --index-url https://download.pytorch.org/whl/rocm6.1
            +# ROCM 6.2.4 (Linux only)
            +pip install torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 --index-url https://download.pytorch.org/whl/rocm6.2.4
            +# CUDA 11.8
            +pip install torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 --index-url https://download.pytorch.org/whl/cu118
            +# CUDA 12.4
            +pip install torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 --index-url https://download.pytorch.org/whl/cu124
            +# CUDA 12.6
            +pip install torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 --index-url https://download.pytorch.org/whl/cu126
            +# CPU only
            +pip install torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 --index-url https://download.pytorch.org/whl/cpu
            +
            + +

            v2.5.1

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 11.8
            +conda install pytorch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1  pytorch-cuda=11.8 -c pytorch -c nvidia
            +# CUDA 12.1
            +conda install pytorch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 pytorch-cuda=12.1 -c pytorch -c nvidia
            +# CUDA 12.4
            +conda install pytorch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 pytorch-cuda=12.4 -c pytorch -c nvidia
            +# CPU Only
            +conda install pytorch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1
            +
            + +
            Linux and Windows
            + +
            # ROCM 6.1 (Linux only)
            +pip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/rocm6.1
            +# ROCM 6.2 (Linux only)
            +pip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/rocm6.2
            +# CUDA 11.8
            +pip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu118
            +# CUDA 12.1
            +pip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu121
            +# CUDA 12.4
            +pip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu124
            +# CPU only
            +pip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cpu
            +
            + +

            v2.5.0

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 11.8
            +conda install pytorch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0  pytorch-cuda=11.8 -c pytorch -c nvidia
            +# CUDA 12.1
            +conda install pytorch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 pytorch-cuda=12.1 -c pytorch -c nvidia
            +# CUDA 12.4
            +conda install pytorch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 pytorch-cuda=12.4 -c pytorch -c nvidia
            +# CPU Only
            +conda install pytorch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0
            +
            + +
            Linux and Windows
            + +
            # ROCM 6.1 (Linux only)
            +pip install torch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 --index-url https://download.pytorch.org/whl/rocm6.1
            +# ROCM 6.2 (Linux only)
            +pip install torch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 --index-url https://download.pytorch.org/whl/rocm6.2
            +# CUDA 11.8
            +pip install torch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 --index-url https://download.pytorch.org/whl/cu118
            +# CUDA 12.1
            +pip install torch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 --index-url https://download.pytorch.org/whl/cu121
            +# CUDA 12.4
            +pip install torch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 --index-url https://download.pytorch.org/whl/cu124
            +# CPU only
            +pip install torch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 --index-url https://download.pytorch.org/whl/cpu
            +
            + +

            v2.4.1

            +

            Conda

            +
            OSX
            +
            # conda
            +conda install pytorch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1 -c pytorch
            +
            +
            Linux and Windows
            +
            # CUDA 11.8
            +conda install pytorch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1  pytorch-cuda=11.8 -c pytorch -c nvidia
            +# CUDA 12.1
            +conda install pytorch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1 pytorch-cuda=12.1 -c pytorch -c nvidia
            +# CUDA 12.4
            +conda install pytorch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1 pytorch-cuda=12.4 -c pytorch -c nvidia
            +# CPU Only
            +conda install pytorch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1 cpuonly -c pytorch
            +
            +

            Wheel

            +
            OSX
            +
            pip install torch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1
            +
            +
            Linux and Windows
            +
            # ROCM 6.1 (Linux only)
            +pip install torch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1 --index-url https://download.pytorch.org/whl/rocm6.1
            +# CUDA 11.8
            +pip install torch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1 --index-url https://download.pytorch.org/whl/cu118
            +# CUDA 12.1
            +pip install torch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1 --index-url https://download.pytorch.org/whl/cu121
            +# CUDA 12.4
            +pip install torch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1 --index-url https://download.pytorch.org/whl/cu124
            +# CPU only
            +pip install torch==2.4.1 torchvision==0.19.1 torchaudio==2.4.1 --index-url https://download.pytorch.org/whl/cpu
            +
            + +

            v2.4.0

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 11.8
            +conda install pytorch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0  pytorch-cuda=11.8 -c pytorch -c nvidia
            +# CUDA 12.1
            +conda install pytorch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 pytorch-cuda=12.1 -c pytorch -c nvidia
            +# CUDA 12.4
            +conda install pytorch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 pytorch-cuda=12.4 -c pytorch -c nvidia
            +# CPU Only
            +conda install pytorch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0
            +
            + +
            Linux and Windows
            + +
            # ROCM 6.1 (Linux only)
            +pip install torch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 --index-url https://download.pytorch.org/whl/rocm6.1
            +# CUDA 11.8
            +pip install torch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 --index-url https://download.pytorch.org/whl/cu118
            +# CUDA 12.1
            +pip install torch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 --index-url https://download.pytorch.org/whl/cu121
            +# CUDA 12.4
            +pip install torch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 --index-url https://download.pytorch.org/whl/cu124
            +# CPU only
            +pip install torch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 --index-url https://download.pytorch.org/whl/cpu
            +
            + +

            v2.3.1

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 11.8
            +conda install pytorch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 pytorch-cuda=11.8 -c pytorch -c nvidia
            +# CUDA 12.1
            +conda install pytorch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 pytorch-cuda=12.1 -c pytorch -c nvidia
            +# CPU Only
            +conda install pytorch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1
            +
            + +
            Linux and Windows
            + +
            # ROCM 6.0 (Linux only)
            +pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --index-url https://download.pytorch.org/whl/rocm6.0
            +# CUDA 11.8
            +pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --index-url https://download.pytorch.org/whl/cu118
            +# CUDA 12.1
            +pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --index-url https://download.pytorch.org/whl/cu121
            +# CPU only
            +pip install torch==2.3.1 torchvision==0.18.1 torchaudio==2.3.1 --index-url https://download.pytorch.org/whl/cpu
            +
            + +

            v2.3.0

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==2.3.0 torchvision==0.18.0 torchaudio==2.3.0 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 11.8
            +conda install pytorch==2.3.0 torchvision==0.18.0 torchaudio==2.3.0 pytorch-cuda=11.8 -c pytorch -c nvidia
            +# CUDA 12.1
            +conda install pytorch==2.3.0 torchvision==0.18.0 torchaudio==2.3.0 pytorch-cuda=12.1 -c pytorch -c nvidia
            +# CPU Only
            +conda install pytorch==2.3.0 torchvision==0.18.0 torchaudio==2.3.0 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==2.3.0 torchvision==0.18.0 torchaudio==2.3.0
            +
            + +
            Linux and Windows
            + +
            # ROCM 6.0 (Linux only)
            +pip install torch==2.3.0 torchvision==0.18.0 torchaudio==2.3.0 --index-url https://download.pytorch.org/whl/rocm6.0
            +# CUDA 11.8
            +pip install torch==2.3.0 torchvision==0.18.0 torchaudio==2.3.0 --index-url https://download.pytorch.org/whl/cu118
            +# CUDA 12.1
            +pip install torch==2.3.0 torchvision==0.18.0 torchaudio==2.3.0 --index-url https://download.pytorch.org/whl/cu121
            +# CPU only
            +pip install torch==2.3.0 torchvision==0.18.0 torchaudio==2.3.0 --index-url https://download.pytorch.org/whl/cpu
            +
            + +

            v2.2.2

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 11.8
            +conda install pytorch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 pytorch-cuda=11.8 -c pytorch -c nvidia
            +# CUDA 12.1
            +conda install pytorch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 pytorch-cuda=12.1 -c pytorch -c nvidia
            +# CPU Only
            +conda install pytorch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2
            +
            + +
            Linux and Windows
            + +
            # ROCM 5.7 (Linux only)
            +pip install torch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 --index-url https://download.pytorch.org/whl/rocm5.7
            +# CUDA 11.8
            +pip install torch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 --index-url https://download.pytorch.org/whl/cu118
            +# CUDA 12.1
            +pip install torch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 --index-url https://download.pytorch.org/whl/cu121
            +# CPU only
            +pip install torch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 --index-url https://download.pytorch.org/whl/cpu
            +
            + +

            v2.2.1

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 11.8
            +conda install pytorch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 pytorch-cuda=11.8 -c pytorch -c nvidia
            +# CUDA 12.1
            +conda install pytorch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 pytorch-cuda=12.1 -c pytorch -c nvidia
            +# CPU Only
            +conda install pytorch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1
            +
            + +
            Linux and Windows
            + +
            # ROCM 5.7 (Linux only)
            +pip install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/rocm5.7
            +# CUDA 11.8
            +pip install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cu118
            +# CUDA 12.1
            +pip install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cu121
            +# CPU only
            +pip install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cpu
            +
            + +

            v2.2.0

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 11.8
            +conda install pytorch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 pytorch-cuda=11.8 -c pytorch -c nvidia
            +# CUDA 12.1
            +conda install pytorch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 pytorch-cuda=12.1 -c pytorch -c nvidia
            +# CPU Only
            +conda install pytorch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0
            +
            + +
            Linux and Windows
            + +
            # ROCM 5.6 (Linux only)
            +pip install torch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 --index-url https://download.pytorch.org/whl/rocm5.6
            +# CUDA 11.8
            +pip install torch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 --index-url https://download.pytorch.org/whl/cu118
            +# CUDA 12.1
            +pip install torch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 --index-url https://download.pytorch.org/whl/cu121
            +# CPU only
            +pip install torch==2.2.0 torchvision==0.17.0 torchaudio==2.2.0 --index-url https://download.pytorch.org/whl/cpu
            +
            + +

            v2.1.2

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 11.8
            +conda install pytorch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 pytorch-cuda=11.8 -c pytorch -c nvidia
            +# CUDA 12.1
            +conda install pytorch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 pytorch-cuda=12.1 -c pytorch -c nvidia
            +# CPU Only
            +conda install pytorch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2
            +
            + +
            Linux and Windows
            + +
            # ROCM 5.6 (Linux only)
            +pip install torch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 --index-url https://download.pytorch.org/whl/rocm5.6
            +# CUDA 11.8
            +pip install torch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 --index-url https://download.pytorch.org/whl/cu118
            +# CUDA 12.1
            +pip install torch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 --index-url https://download.pytorch.org/whl/cu121
            +# CPU only
            +pip install torch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 --index-url https://download.pytorch.org/whl/cpu
            +
            + +

            v2.1.1

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 11.8
            +conda install pytorch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 pytorch-cuda=11.8 -c pytorch -c nvidia
            +# CUDA 12.1
            +conda install pytorch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 pytorch-cuda=12.1 -c pytorch -c nvidia
            +# CPU Only
            +conda install pytorch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1
            +
            + +
            Linux and Windows
            + +
            # ROCM 5.6 (Linux only)
            +pip install torch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 --index-url https://download.pytorch.org/whl/rocm5.6
            +# CUDA 11.8
            +pip install torch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 --index-url https://download.pytorch.org/whl/cu118
            +# CUDA 12.1
            +pip install torch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 --index-url https://download.pytorch.org/whl/cu121
            +# CPU only
            +pip install torch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 --index-url https://download.pytorch.org/whl/cpu
            +
            + +

            v2.1.0

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 11.8
            +conda install pytorch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 pytorch-cuda=11.8 -c pytorch -c nvidia
            +# CUDA 12.1
            +conda install pytorch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 pytorch-cuda=12.1 -c pytorch -c nvidia
            +# CPU Only
            +conda install pytorch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0
            +
            + +
            Linux and Windows
            + +
            # ROCM 5.6 (Linux only)
            +pip install torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 --index-url https://download.pytorch.org/whl/rocm5.6
            +# CUDA 11.8
            +pip install torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 --index-url https://download.pytorch.org/whl/cu118
            +# CUDA 12.1
            +pip install torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 --index-url https://download.pytorch.org/whl/cu121
            +# CPU only
            +pip install torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 --index-url https://download.pytorch.org/whl/cpu
            +
            + +

            v2.0.1

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 11.7
            +conda install pytorch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 pytorch-cuda=11.7 -c pytorch -c nvidia
            +# CUDA 11.8
            +conda install pytorch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 pytorch-cuda=11.8 -c pytorch -c nvidia
            +# CPU Only
            +conda install pytorch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2
            +
            + +
            Linux and Windows
            + +
            # ROCM 5.4.2 (Linux only)
            +pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/rocm5.4.2
            +# CUDA 11.7
            +pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2
            +# CUDA 11.8
            +pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cu118
            +# CPU only
            +pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cpu
            +
            + +

            v2.0.0

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==2.0.0 torchvision==0.15.0 torchaudio==2.0.0 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 11.7
            +conda install pytorch==2.0.0 torchvision==0.15.0 torchaudio==2.0.0 pytorch-cuda=11.7 -c pytorch -c nvidia
            +# CUDA 11.8
            +conda install pytorch==2.0.0 torchvision==0.15.0 torchaudio==2.0.0 pytorch-cuda=11.8 -c pytorch -c nvidia
            +# CPU Only
            +conda install pytorch==2.0.0 torchvision==0.15.0 torchaudio==2.0.0 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==2.0.0 torchvision==0.15.1 torchaudio==2.0.1
            +
            + +
            Linux and Windows
            + +
            # ROCM 5.4.2 (Linux only)
            +pip install torch==2.0.0 torchvision==0.15.1 torchaudio==2.0.1 --index-url https://download.pytorch.org/whl/rocm5.4.2
            +# CUDA 11.7
            +pip install torch==2.0.0 torchvision==0.15.1 torchaudio==2.0.1
            +# CUDA 11.8
            +pip install torch==2.0.0 torchvision==0.15.1 torchaudio==2.0.1 --index-url https://download.pytorch.org/whl/cu118
            +# CPU only
            +pip install torch==2.0.0 torchvision==0.15.1 torchaudio==2.0.1 --index-url https://download.pytorch.org/whl/cpu
            +
            + +

            v1.13.1

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 11.6
            +conda install pytorch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 pytorch-cuda=11.6 -c pytorch -c nvidia
            +# CUDA 11.7
            +conda install pytorch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 pytorch-cuda=11.7 -c pytorch -c nvidia
            +# CPU Only
            +conda install pytorch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1
            +
            + +
            Linux and Windows
            + +
            # ROCM 5.2 (Linux only)
            +pip install torch==1.13.1+rocm5.2 torchvision==0.14.1+rocm5.2 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/rocm5.2
            +# CUDA 11.6
            +pip install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116
            +# CUDA 11.7
            +pip install torch==1.13.1+cu117 torchvision==0.14.1+cu117 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu117
            +# CPU only
            +pip install torch==1.13.1+cpu torchvision==0.14.1+cpu torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cpu
            +
            + +

            v1.13.0

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==1.13.0 torchvision==0.14.0 torchaudio==0.13.0 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 11.6
            +conda install pytorch==1.13.0 torchvision==0.14.0 torchaudio==0.13.0 pytorch-cuda=11.6 -c pytorch -c nvidia
            +# CUDA 11.7
            +conda install pytorch==1.13.0 torchvision==0.14.0 torchaudio==0.13.0 pytorch-cuda=11.7 -c pytorch -c nvidia
            +# CPU Only
            +conda install pytorch==1.13.0 torchvision==0.14.0 torchaudio==0.13.0 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==1.13.0 torchvision==0.14.0 torchaudio==0.13.0
            +
            + +
            Linux and Windows
            + +
            # ROCM 5.2 (Linux only)
            +pip install torch==1.13.0+rocm5.2 torchvision==0.14.0+rocm5.2 torchaudio==0.13.0 --extra-index-url https://download.pytorch.org/whl/rocm5.2
            +# CUDA 11.6
            +pip install torch==1.13.0+cu116 torchvision==0.14.0+cu116 torchaudio==0.13.0 --extra-index-url https://download.pytorch.org/whl/cu116
            +# CUDA 11.7
            +pip install torch==1.13.0+cu117 torchvision==0.14.0+cu117 torchaudio==0.13.0 --extra-index-url https://download.pytorch.org/whl/cu117
            +# CPU only
            +pip install torch==1.13.0+cpu torchvision==0.14.0+cpu torchaudio==0.13.0 --extra-index-url https://download.pytorch.org/whl/cpu
            +
            + +

            v1.12.1

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 10.2
            +conda install pytorch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1 cudatoolkit=10.2 -c pytorch
            +# CUDA 11.3
            +conda install pytorch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1 cudatoolkit=11.3 -c pytorch
            +# CUDA 11.6
            +conda install pytorch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1 cudatoolkit=11.6 -c pytorch -c conda-forge
            +# CPU Only
            +conda install pytorch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1
            +
            + +
            Linux and Windows
            + +
            # ROCM 5.1.1 (Linux only)
            +pip install torch==1.12.1+rocm5.1.1 torchvision==0.13.1+rocm5.1.1 torchaudio==0.12.1 --extra-index-url  https://download.pytorch.org/whl/rocm5.1.1
            +# CUDA 11.6
            +pip install torch==1.12.1+cu116 torchvision==0.13.1+cu116 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu116
            +# CUDA 11.3
            +pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu113
            +# CUDA 10.2
            +pip install torch==1.12.1+cu102 torchvision==0.13.1+cu102 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu102
            +# CPU only
            +pip install torch==1.12.1+cpu torchvision==0.13.1+cpu torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cpu
            +
            + +

            v1.12.0

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==1.12.0 torchvision==0.13.0 torchaudio==0.12.0 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 10.2
            +conda install pytorch==1.12.0 torchvision==0.13.0 torchaudio==0.12.0 cudatoolkit=10.2 -c pytorch
            +# CUDA 11.3
            +conda install pytorch==1.12.0 torchvision==0.13.0 torchaudio==0.12.0 cudatoolkit=11.3 -c pytorch
            +# CUDA 11.6
            +conda install pytorch==1.12.0 torchvision==0.13.0 torchaudio==0.12.0 cudatoolkit=11.6 -c pytorch -c conda-forge
            +# CPU Only
            +conda install pytorch==1.12.0 torchvision==0.13.0 torchaudio==0.12.0 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==1.12.0 torchvision==0.13.0 torchaudio==0.12.0
            +
            + +
            Linux and Windows
            + +
            # ROCM 5.1.1 (Linux only)
            +pip install torch==1.12.0+rocm5.1.1 torchvision==0.13.0+rocm5.1.1 torchaudio==0.12.0 --extra-index-url  https://download.pytorch.org/whl/rocm5.1.1
            +# CUDA 11.6
            +pip install torch==1.12.0+cu116 torchvision==0.13.0+cu116 torchaudio==0.12.0 --extra-index-url https://download.pytorch.org/whl/cu116
            +# CUDA 11.3
            +pip install torch==1.12.0+cu113 torchvision==0.13.0+cu113 torchaudio==0.12.0 --extra-index-url https://download.pytorch.org/whl/cu113
            +# CUDA 10.2
            +pip install torch==1.12.0+cu102 torchvision==0.13.0+cu102 torchaudio==0.12.0 --extra-index-url https://download.pytorch.org/whl/cu102
            +# CPU only
            +pip install torch==1.12.0+cpu torchvision==0.13.0+cpu torchaudio==0.12.0 --extra-index-url https://download.pytorch.org/whl/cpu
            +
            + +

            v1.11.0

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==1.11.0 torchvision==0.12.0 torchaudio==0.11.0 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 10.2
            +conda install pytorch==1.11.0 torchvision==0.12.0 torchaudio==0.11.0 cudatoolkit=10.2 -c pytorch
            +
            +# CUDA 11.3
            +conda install pytorch==1.11.0 torchvision==0.12.0 torchaudio==0.11.0 cudatoolkit=11.3 -c pytorch
            +
            +# CPU Only
            +conda install pytorch==1.11.0 torchvision==0.12.0 torchaudio==0.11.0 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==1.11.0 torchvision==0.12.0 torchaudio==0.11.0
            +
            + +
            Linux and Windows
            + +
            # ROCM 4.5.2 (Linux only)
            +pip install torch==1.11.0+rocm4.5.2 torchvision==0.12.0+rocm4.5.2 torchaudio==0.11.0 --extra-index-url  https://download.pytorch.org/whl/rocm4.5.2
            +
            +# CUDA 11.3
            +pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 torchaudio==0.11.0 --extra-index-url https://download.pytorch.org/whl/cu113
            +
            +# CUDA 10.2
            +pip install torch==1.11.0+cu102 torchvision==0.12.0+cu102 torchaudio==0.11.0 --extra-index-url https://download.pytorch.org/whl/cu102
            +
            +# CPU only
            +pip install torch==1.11.0+cpu torchvision==0.12.0+cpu torchaudio==0.11.0 --extra-index-url https://download.pytorch.org/whl/cpu
            +
            + +

            v1.10.1

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==1.10.1 torchvision==0.11.2 torchaudio==0.10.1 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 10.2
            +conda install pytorch==1.10.1 torchvision==0.11.2 torchaudio==0.10.1 cudatoolkit=10.2 -c pytorch
            +
            +# CUDA 11.3
            +conda install pytorch==1.10.1 torchvision==0.11.2 torchaudio==0.10.1 cudatoolkit=11.3 -c pytorch -c conda-forge
            +
            +# CPU Only
            +conda install pytorch==1.10.1 torchvision==0.11.2 torchaudio==0.10.1 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==1.10.1 torchvision==0.11.2 torchaudio==0.10.1
            +
            + +
            Linux and Windows
            + +
            # ROCM 4.2 (Linux only)
            +pip install torch==1.10.1+rocm4.2 torchvision==0.11.2+rocm4.2 torchaudio==0.10.1 -f https://download.pytorch.org/whl/rocm4.2/torch_stable.html
            +
            +# ROCM 4.1 (Linux only)
            +pip install torch==1.10.1+rocm4.1 torchvision==0.11.2+rocm4.1 torchaudio==0.10.1 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# ROCM 4.0.1 (Linux only)
            +pip install torch==1.10.1+rocm4.0.1 torchvision==0.10.2+rocm4.0.1 torchaudio==0.10.1 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CUDA 11.1
            +pip install torch==1.10.1+cu111 torchvision==0.11.2+cu111 torchaudio==0.10.1 -f https://download.pytorch.org/whl/cu111/torch_stable.html
            +
            +# CUDA 10.2
            +pip install torch==1.10.1+cu102 torchvision==0.11.2+cu102 torchaudio==0.10.1 -f https://download.pytorch.org/whl/cu102/torch_stable.html
            +
            +# CPU only
            +pip install torch==1.10.1+cpu torchvision==0.11.2+cpu torchaudio==0.10.1 -f https://download.pytorch.org/whl/cpu/torch_stable.html
            +
            + +

            v1.10.0

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==1.10.0 torchvision==0.11.0 torchaudio==0.10.0 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 10.2
            +conda install pytorch==1.10.0 torchvision==0.11.0 torchaudio==0.10.0 cudatoolkit=10.2 -c pytorch
            +
            +# CUDA 11.3
            +conda install pytorch==1.10.0 torchvision==0.11.0 torchaudio==0.10.0 cudatoolkit=11.3 -c pytorch -c conda-forge
            +
            +# CPU Only
            +conda install pytorch==1.10.0 torchvision==0.11.0 torchaudio==0.10.0 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==1.10.0 torchvision==0.11.0 torchaudio==0.10.0
            +
            + +
            Linux and Windows
            + +
            # ROCM 4.2 (Linux only)
            +pip install torch==1.10.0+rocm4.2 torchvision==0.11.0+rocm4.2 torchaudio==0.10.0 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# ROCM 4.1 (Linux only)
            +pip install torch==1.10.0+rocm4.1 torchvision==0.11.0+rocm4.1 torchaudio==0.10.0 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# ROCM 4.0.1 (Linux only)
            +pip install torch==1.10.0+rocm4.0.1 torchvision==0.10.1+rocm4.0.1 torchaudio==0.10.0 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CUDA 11.1
            +pip install torch==1.10.0+cu111 torchvision==0.11.0+cu111 torchaudio==0.10.0 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CUDA 10.2
            +pip install torch==1.10.0+cu102 torchvision==0.11.0+cu102 torchaudio==0.10.0 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CPU only
            +pip install torch==1.10.0+cpu torchvision==0.11.0+cpu torchaudio==0.10.0 -f https://download.pytorch.org/whl/torch_stable.html
            +
            + +

            v1.9.1

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==1.9.1 torchvision==0.10.1 torchaudio==0.9.1 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 10.2
            +conda install pytorch==1.9.1 torchvision==0.10.1 torchaudio==0.9.1 cudatoolkit=10.2 -c pytorch
            +
            +# CUDA 11.3
            +conda install pytorch==1.9.1 torchvision==0.10.1 torchaudio==0.9.1 cudatoolkit=11.3 -c pytorch -c conda-forge
            +
            +# CPU Only
            +conda install pytorch==1.9.1 torchvision==0.10.1 torchaudio==0.9.1 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==1.9.1 torchvision==0.10.1 torchaudio==0.9.1
            +
            + +
            Linux and Windows
            + +
            # ROCM 4.2 (Linux only)
            +pip install torch==1.9.1+rocm4.2 torchvision==0.10.1+rocm4.2 torchaudio==0.9.1 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# ROCM 4.1 (Linux only)
            +pip install torch==1.9.1+rocm4.1 torchvision==0.10.1+rocm4.1 torchaudio==0.9.1 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# ROCM 4.0.1 (Linux only)
            +pip install torch==1.9.1+rocm4.0.1 torchvision==0.10.1+rocm4.0.1 torchaudio==0.9.1 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CUDA 11.1
            +pip install torch==1.9.1+cu111 torchvision==0.10.1+cu111 torchaudio==0.9.1 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CUDA 10.2
            +pip install torch==1.9.1+cu102 torchvision==0.10.1+cu102 torchaudio==0.9.1 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CPU only
            +pip install torch==1.9.1+cpu torchvision==0.10.1+cpu torchaudio==0.9.1 -f https://download.pytorch.org/whl/torch_stable.html
            +
            + +

            v1.9.0

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==1.9.0 torchvision==0.10.0 torchaudio==0.9.0 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 10.2
            +conda install pytorch==1.9.0 torchvision==0.10.0 torchaudio==0.9.0 cudatoolkit=10.2 -c pytorch
            +
            +# CUDA 11.3
            +conda install pytorch==1.9.0 torchvision==0.10.0 torchaudio==0.9.0 cudatoolkit=11.3 -c pytorch -c conda-forge
            +
            +# CPU Only
            +conda install pytorch==1.9.0 torchvision==0.10.0 torchaudio==0.9.0 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==1.9.0 torchvision==0.10.0 torchaudio==0.9.0
            +
            + +
            Linux and Windows
            + +
            # ROCM 4.2 (Linux only)
            +pip install torch==1.9.0+rocm4.2 torchvision==0.10.0+rocm4.2 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# ROCM 4.1 (Linux only)
            +pip install torch==1.9.0+rocm4.1 torchvision==0.10.0+rocm4.1 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# ROCM 4.0.1 (Linux only)
            +pip install torch==1.9.0+rocm4.0.1 torchvision==0.10.0+rocm4.0.1 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CUDA 11.1
            +pip install torch==1.9.0+cu111 torchvision==0.10.0+cu111 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CUDA 10.2
            +pip install torch==1.9.0+cu102 torchvision==0.10.0+cu102 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CPU only
            +pip install torch==1.9.0+cpu torchvision==0.10.0+cpu torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html
            +
            + +

            v1.8.2 with LTS support

            + +

            Conda

            + +
            OSX
            + +

            macOS is currently not supported for LTS.

            + +
            Linux and Windows
            + +
            # CUDA 10.2
            +# NOTE: PyTorch LTS version 1.8.2 is only supported for Python <= 3.8.
            +conda install pytorch torchvision torchaudio cudatoolkit=10.2 -c pytorch-lts
            +
            +# CUDA 11.1 (Linux)
            +# NOTE: 'nvidia' channel is required for cudatoolkit 11.1 <br> <b>NOTE:</b> Pytorch LTS version 1.8.2 is only supported for Python <= 3.8.
            +conda install pytorch torchvision torchaudio cudatoolkit=11.1 -c pytorch-lts -c nvidia
            +
            +# CUDA 11.1 (Windows)
            +# 'conda-forge' channel is required for cudatoolkit 11.1 <br> <b>NOTE:</b> Pytorch LTS version 1.8.2 is only supported for Python <= 3.8.
            +conda install pytorch torchvision torchaudio cudatoolkit=11.1 -c pytorch-lts -c conda-forge
            +
            +# CPU Only
            +# Pytorch LTS version 1.8.2 is only supported for Python <= 3.8.
            +conda install pytorch torchvision torchaudio cpuonly -c pytorch-lts
            +
            +# ROCM5.x
            +
            +Not supported in LTS.
            +
            + +

            Wheel

            + +
            OSX
            + +

            macOS is currently not supported in LTS.

            + +
            Linux and Windows
            + +
            # CUDA 10.2
            +pip3 install torch==1.8.2 torchvision==0.9.2 torchaudio==0.8.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cu102
            +
            +# CUDA 11.1
            +pip3 install torch==1.8.2 torchvision==0.9.2 torchaudio==0.8.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cu111
            +
            +# CPU Only
            +pip3 install torch==1.8.2 torchvision==0.9.2 torchaudio==0.8.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cpu
            +
            +# ROCM5.x
            +
            +Not supported in LTS.
            +
            + +

            v1.8.1

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==1.8.1 torchvision==0.9.1 torchaudio==0.8.1 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 10.2
            +conda install pytorch==1.8.1 torchvision==0.9.1 torchaudio==0.8.1 cudatoolkit=10.2 -c pytorch
            +
            +# CUDA 11.3
            +conda install pytorch==1.8.1 torchvision==0.9.1 torchaudio==0.8.1 cudatoolkit=11.3 -c pytorch -c conda-forge
            +
            +# CPU Only
            +conda install pytorch==1.8.1 torchvision==0.9.1 torchaudio==0.8.1 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==1.8.1 torchvision==0.9.1 torchaudio==0.8.1
            +
            + +
            Linux and Windows
            + +
            # ROCM 4.0.1 (Linux only)
            +pip install torch==1.8.1+rocm4.0.1 torchvision==0.9.1+rocm4.0.1 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# ROCM 3.10 (Linux only)
            +pip install torch==1.8.1+rocm3.10 torchvision==0.9.1+rocm3.10 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CUDA 11.1
            +pip install torch==1.8.1+cu111 torchvision==0.9.1+cu111 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CUDA 10.2
            +pip install torch==1.8.1+cu102 torchvision==0.9.1+cu102 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CUDA 10.1
            +pip install torch==1.8.1+cu101 torchvision==0.9.1+cu101 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CPU only
            +pip install torch==1.8.1+cpu torchvision==0.9.1+cpu torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html
            +
            + +

            v1.8.0

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==1.8.0 torchvision==0.9.0 torchaudio==0.8.0 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 10.2
            +conda install pytorch==1.8.0 torchvision==0.9.0 torchaudio==0.8.0 cudatoolkit=10.2 -c pytorch
            +
            +# CUDA 11.1
            +conda install pytorch==1.8.0 torchvision==0.9.0 torchaudio==0.8.0 cudatoolkit=11.1 -c pytorch -c conda-forge
            +
            +# CPU Only
            +conda install pytorch==1.8.0 torchvision==0.9.0 torchaudio==0.8.0 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==1.8.0 torchvision==0.9.0 torchaudio==0.8.0
            +
            + +
            Linux and Windows
            + +
            # RocM 4.0.1 (Linux only)
            +pip install torch -f https://download.pytorch.org/whl/rocm4.0.1/torch_stable.html
            +pip install ninja
            +pip install 'git+https://github.com/pytorch/vision.git@v0.9.0'
            +
            +# CUDA 11.1
            +pip install torch==1.8.0+cu111 torchvision==0.9.0+cu111 torchaudio==0.8.0 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CUDA 10.2
            +pip install torch==1.8.0 torchvision==0.9.0 torchaudio==0.8.0
            +
            +# CPU only
            +pip install torch==1.8.0+cpu torchvision==0.9.0+cpu torchaudio==0.8.0 -f https://download.pytorch.org/whl/torch_stable.html
            +
            + +

            v1.7.1

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 9.2
            +conda install pytorch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2 cudatoolkit=9.2 -c pytorch
            +
            +# CUDA 10.1
            +conda install pytorch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2 cudatoolkit=10.1 -c pytorch
            +
            +# CUDA 10.2
            +conda install pytorch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2 cudatoolkit=10.2 -c pytorch
            +
            +# CUDA 11.0
            +conda install pytorch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2 cudatoolkit=11.0 -c pytorch
            +
            +# CPU Only
            +conda install pytorch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2
            +
            + +
            Linux and Windows
            + +
            # CUDA 11.0
            +pip install torch==1.7.1+cu110 torchvision==0.8.2+cu110 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CUDA 10.2
            +pip install torch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2
            +
            +# CUDA 10.1
            +pip install torch==1.7.1+cu101 torchvision==0.8.2+cu101 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CUDA 9.2
            +pip install torch==1.7.1+cu92 torchvision==0.8.2+cu92 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CPU only
            +pip install torch==1.7.1+cpu torchvision==0.8.2+cpu torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html
            +
            + +

            v1.7.0

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 9.2
            +conda install pytorch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0 cudatoolkit=9.2 -c pytorch
            +
            +# CUDA 10.1
            +conda install pytorch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0 cudatoolkit=10.1 -c pytorch
            +
            +# CUDA 10.2
            +conda install pytorch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0 cudatoolkit=10.2 -c pytorch
            +
            +# CUDA 11.0
            +conda install pytorch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0 cudatoolkit=11.0 -c pytorch
            +
            +# CPU Only
            +conda install pytorch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0
            +
            + +
            Linux and Windows
            + +
            # CUDA 11.0
            +pip install torch==1.7.0+cu110 torchvision==0.8.0+cu110 torchaudio==0.7.0 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CUDA 10.2
            +pip install torch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0
            +
            +# CUDA 10.1
            +pip install torch==1.7.0+cu101 torchvision==0.8.0+cu101 torchaudio==0.7.0 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CUDA 9.2
            +pip install torch==1.7.0+cu92 torchvision==0.8.0+cu92 torchaudio==0.7.0 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CPU only
            +pip install torch==1.7.0+cpu torchvision==0.8.0+cpu torchaudio==0.7.0 -f https://download.pytorch.org/whl/torch_stable.html
            +
            + +

            v1.6.0

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==1.6.0 torchvision==0.7.0 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 9.2
            +conda install pytorch==1.6.0 torchvision==0.7.0 cudatoolkit=9.2 -c pytorch
            +
            +# CUDA 10.1
            +conda install pytorch==1.6.0 torchvision==0.7.0 cudatoolkit=10.1 -c pytorch
            +
            +# CUDA 10.2
            +conda install pytorch==1.6.0 torchvision==0.7.0 cudatoolkit=10.2 -c pytorch
            +
            +# CPU Only
            +conda install pytorch==1.6.0 torchvision==0.7.0 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==1.6.0 torchvision==0.7.0
            +
            + +
            Linux and Windows
            + +
            # CUDA 10.2
            +pip install torch==1.6.0 torchvision==0.7.0
            +
            +# CUDA 10.1
            +pip install torch==1.6.0+cu101 torchvision==0.7.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CUDA 9.2
            +pip install torch==1.6.0+cu92 torchvision==0.7.0+cu92 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CPU only
            +pip install torch==1.6.0+cpu torchvision==0.7.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
            +
            + +

            v1.5.1

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==1.5.1 torchvision==0.6.1 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 9.2
            +conda install pytorch==1.5.1 torchvision==0.6.1 cudatoolkit=9.2 -c pytorch
            +
            +# CUDA 10.1
            +conda install pytorch==1.5.1 torchvision==0.6.1 cudatoolkit=10.1 -c pytorch
            +
            +# CUDA 10.2
            +conda install pytorch==1.5.1 torchvision==0.6.1 cudatoolkit=10.2 -c pytorch
            +
            +# CPU Only
            +conda install pytorch==1.5.1 torchvision==0.6.1 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==1.5.1 torchvision==0.6.1
            +
            + +
            Linux and Windows
            + +
            # CUDA 10.2
            +pip install torch==1.5.1 torchvision==0.6.1
            +
            +# CUDA 10.1
            +pip install torch==1.5.1+cu101 torchvision==0.6.1+cu101 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CUDA 9.2
            +pip install torch==1.5.1+cu92 torchvision==0.6.1+cu92 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CPU only
            +pip install torch==1.5.1+cpu torchvision==0.6.1+cpu -f https://download.pytorch.org/whl/torch_stable.html
            +
            + +

            v1.5.0

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==1.5.0 torchvision==0.6.0 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 9.2
            +conda install pytorch==1.5.0 torchvision==0.6.0 cudatoolkit=9.2 -c pytorch
            +
            +# CUDA 10.1
            +conda install pytorch==1.5.0 torchvision==0.6.0 cudatoolkit=10.1 -c pytorch
            +
            +# CUDA 10.2
            +conda install pytorch==1.5.0 torchvision==0.6.0 cudatoolkit=10.2 -c pytorch
            +
            +# CPU Only
            +conda install pytorch==1.5.0 torchvision==0.6.0 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==1.5.0 torchvision==0.6.0
            +
            + +
            Linux and Windows
            + +
            # CUDA 10.2
            +pip install torch==1.5.0 torchvision==0.6.0
            +
            +# CUDA 10.1
            +pip install torch==1.5.0+cu101 torchvision==0.6.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CUDA 9.2
            +pip install torch==1.5.0+cu92 torchvision==0.6.0+cu92 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CPU only
            +pip install torch==1.5.0+cpu torchvision==0.6.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
            +
            + +

            v1.4.0

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==1.4.0 torchvision==0.5.0 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 9.2
            +conda install pytorch==1.4.0 torchvision==0.5.0 cudatoolkit=9.2 -c pytorch
            +
            +# CUDA 10.1
            +conda install pytorch==1.4.0 torchvision==0.5.0 cudatoolkit=10.1 -c pytorch
            +
            +# CPU Only
            +conda install pytorch==1.4.0 torchvision==0.5.0 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==1.4.0 torchvision==0.5.0
            +
            + +
            Linux and Windows
            + +
            # CUDA 10.1
            +pip install torch==1.4.0 torchvision==0.5.0
            +
            +# CUDA 9.2
            +pip install torch==1.4.0+cu92 torchvision==0.5.0+cu92 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CPU only
            +pip install torch==1.4.0+cpu torchvision==0.5.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
            +
            + +

            v1.2.0

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==1.2.0 torchvision==0.4.0 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 9.2
            +conda install pytorch==1.2.0 torchvision==0.4.0 cudatoolkit=9.2 -c pytorch
            +
            +# CUDA 10.0
            +conda install pytorch==1.2.0 torchvision==0.4.0 cudatoolkit=10.0 -c pytorch
            +
            +# CPU Only
            +conda install pytorch==1.2.0 torchvision==0.4.0 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==1.2.0 torchvision==0.4.0
            +
            + +
            Linux and Windows
            + +
            # CUDA 10.0
            +pip install torch==1.2.0 torchvision==0.4.0
            +
            +# CUDA 9.2
            +pip install torch==1.2.0+cu92 torchvision==0.4.0+cu92 -f https://download.pytorch.org/whl/torch_stable.html
            +
            +# CPU only
            +pip install torch==1.2.0+cpu torchvision==0.4.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
            +
            + +

            v1.1.0

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==1.1.0 torchvision==0.3.0 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 9.0
            +conda install pytorch==1.1.0 torchvision==0.3.0 cudatoolkit=9.0 -c pytorch
            +
            +# CUDA 10.0
            +conda install pytorch==1.1.0 torchvision==0.3.0 cudatoolkit=10.0 -c pytorch
            +
            +# CPU Only
            +conda install pytorch-cpu==1.1.0 torchvision-cpu==0.3.0 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==1.1.0 torchvision==0.3.0
            +
            + +
            Linux and Windows
            + +
            # CUDA 10.0
            +Download and install wheel from https://download.pytorch.org/whl/cu100/torch_stable.html
            +
            +# CUDA 9.0
            +Download and install wheel from https://download.pytorch.org/whl/cu90/torch_stable.html
            +
            +# CPU only
            +Download and install wheel from https://download.pytorch.org/whl/cpu/torch_stable.html
            +
            + +

            v1.0.1

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==1.0.1 torchvision==0.2.2 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 9.0
            +conda install pytorch==1.0.1 torchvision==0.2.2 cudatoolkit=9.0 -c pytorch
            +
            +# CUDA 10.0
            +conda install pytorch==1.0.1 torchvision==0.2.2 cudatoolkit=10.0 -c pytorch
            +
            +# CPU Only
            +conda install pytorch-cpu==1.0.1 torchvision-cpu==0.2.2 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==1.0.1 torchvision==0.2.2
            +
            + +
            Linux and Windows
            + +
            # CUDA 10.0
            +Download and install wheel from https://download.pytorch.org/whl/cu100/torch_stable.html
            +
            +# CUDA 9.0
            +Download and install wheel from https://download.pytorch.org/whl/cu90/torch_stable.html
            +
            +# CPU only
            +Download and install wheel from https://download.pytorch.org/whl/cpu/torch_stable.html
            +
            + +

            v1.0.0

            + +

            Conda

            + +
            OSX
            + +
            # conda
            +conda install pytorch==1.0.0 torchvision==0.2.1 -c pytorch
            +
            + +
            Linux and Windows
            + +
            # CUDA 10.0
            +conda install pytorch==1.0.0 torchvision==0.2.1 cuda100 -c pytorch
            +
            +# CUDA 9.0
            +conda install pytorch==1.0.0 torchvision==0.2.1 cuda90 -c pytorch
            +
            +# CUDA 8.0
            +conda install pytorch==1.0.0 torchvision==0.2.1 cuda80 -c pytorch
            +
            +# CPU Only
            +conda install pytorch-cpu==1.0.0 torchvision-cpu==0.2.1 cpuonly -c pytorch
            +
            + +

            Wheel

            + +
            OSX
            + +
            pip install torch==1.0.0 torchvision==0.2.1
            +
            + +
            Linux and Windows
            + +
            # CUDA 10.0
            +Download and install wheel from https://download.pytorch.org/whl/cu100/torch_stable.html
            +
            +# CUDA 9.0
            +Download and install wheel from https://download.pytorch.org/whl/cu90/torch_stable.html
            +
            +# CUDA 8.0
            +Download and install wheel from https://download.pytorch.org/whl/cu80/torch_stable.html
            +
            +# CPU only
            +Download and install wheel from https://download.pytorch.org/whl/cpu/torch_stable.html
            +
            + +

            Commands for Versions < 1.0.0

            + +

            Via conda

            + +
            +

            This should be used for most previous macOS version installs.

            +
            + +

            To install a previous version of PyTorch via Anaconda or Miniconda, +replace “0.4.1” in the following commands with the desired version +(i.e., “0.2.0”).

            + +

            Installing with CUDA 9

            + +

            conda install pytorch=0.4.1 cuda90 -c pytorch

            + +

            or

            + +

            conda install pytorch=0.4.1 cuda92 -c pytorch

            + +

            Installing with CUDA 8

            + +

            conda install pytorch=0.4.1 cuda80 -c pytorch

            + +

            Installing with CUDA 7.5

            + +

            conda install pytorch=0.4.1 cuda75 -c pytorch

            + +

            Installing without CUDA

            + +

            conda install pytorch=0.4.1 -c pytorch

            + +

            From source

            + +

            It is possible to checkout an older version of PyTorch +and build it. +You can list tags in PyTorch git repository with git tag and checkout a +particular one (replace ‘0.1.9’ with the desired version) with

            + +

            git checkout v0.1.9

            + +

            Follow the install from source instructions in the README.md of the PyTorch +checkout.

            + +

            Via pip

            + +

            Download the whl file with the desired version from the following html pages:

            + + + +

            Then, install the file with pip install [downloaded file]

            + +

            Note: most pytorch versions are available only for specific CUDA versions. For example pytorch=1.0.1 is not available for CUDA 9.2

            + +

            (Old) PyTorch Linux binaries compiled with CUDA 7.5

            + +

            These predate the html page above and have to be manually installed by downloading the wheel file and pip install downloaded_file

            + + + +

            Windows binaries

            + + + +

            Mac and misc. binaries

            + +

            For recent macOS binaries, use conda:

            + +

            e.g.,

            + +

            conda install pytorch=0.4.1 cuda90 -c pytorch +conda install pytorch=0.4.1 cuda92 -c pytorch +conda install pytorch=0.4.1 cuda80 -c pytorch +conda install pytorch=0.4.1 -c pytorch # No CUDA

            + + + + + + + + + + + +
            +
            + + +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + diff --git a/get-started/pytorch-2.0/index.html b/get-started/pytorch-2.0/index.html new file mode 100644 index 000000000000..291f42aca478 --- /dev/null +++ b/get-started/pytorch-2.0/index.html @@ -0,0 +1,1104 @@ + + + + + + + + + + + + + PyTorch 2.x | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + + + +
            +
            +

            Get Started

            + +

            + Select preferences and run the command to install PyTorch locally, or + get started quickly with one of the supported cloud platforms. +

            +
            +
            + +
            +
            + + +
            +
            + +
            +
            +

            Overview

            + +

            Introducing PyTorch 2.0, our first steps toward the next generation 2-series release of PyTorch. Over the last few years we have innovated and iterated from PyTorch 1.0 to the most recent 1.13 and moved to the newly formed PyTorch Foundation, part of the Linux Foundation.

            + +

            PyTorch’s biggest strength beyond our amazing community is that we continue as a first-class Python integration, imperative style, simplicity of the API and options. PyTorch 2.0 offers the same eager-mode development and user experience, while fundamentally changing and supercharging how PyTorch operates at compiler level under the hood. We are able to provide faster performance and support for Dynamic Shapes and Distributed.

            + +

            Below you will find all the information you need to better understand what PyTorch 2.0 is, where it’s going and more importantly how to get started today (e.g., tutorial, requirements, models, common FAQs). There is still a lot to learn and develop but we are looking forward to community feedback and contributions to make the 2-series better and thank you all who have made the 1-series so successful.

            + +

            PyTorch 2.x: faster, more pythonic and as dynamic as ever

            + +

            Today, we announce torch.compile, a feature that pushes PyTorch performance to new heights and starts the move for parts of PyTorch from C++ back into Python. We believe that this is a substantial new direction for PyTorch – hence we call it 2.0. torch.compile is a fully additive (and optional) feature and hence 2.0 is 100% backward compatible by definition.

            + +

            Underpinning torch.compile are new technologies – TorchDynamo, AOTAutograd, PrimTorch and TorchInductor.

            + +
              +
            • +

              TorchDynamo captures PyTorch programs safely using Python Frame Evaluation Hooks and is a significant innovation that was a result of 5 years of our R&D into safe graph capture

              +
            • +
            • +

              AOTAutograd overloads PyTorch’s autograd engine as a tracing autodiff for generating ahead-of-time backward traces.

              +
            • +
            • PrimTorch canonicalizes ~2000+ PyTorch operators down to a closed set of ~250 primitive operators that developers can target to build a complete PyTorch backend. This substantially lowers the barrier of writing a PyTorch feature or backend.
            • +
            • TorchInductor is a deep learning compiler that generates fast code for multiple accelerators and backends. For NVIDIA and AMD GPUs, it uses OpenAI Triton as a key building block.
            • +
            + +

            TorchDynamo, AOTAutograd, PrimTorch and TorchInductor are written in Python and support dynamic shapes (i.e. the ability to send in Tensors of different sizes without inducing a recompilation), making them flexible, easily hackable and lowering the barrier of entry for developers and vendors.

            + +

            To validate these technologies, we used a diverse set of 163 open-source models across various machine learning domains. We built this benchmark carefully to include tasks such as Image Classification, Object Detection, Image Generation, various NLP tasks such as Language Modeling, Q&A, Sequence Classification, Recommender Systems and Reinforcement Learning. We separate the benchmarks into three categories:

            + +
              +
            • 46 models from HuggingFace Transformers
            • +
            • 61 models from TIMM: a collection of state-of-the-art PyTorch image models by Ross Wightman
            • +
            • 56 models from TorchBench: a curated set of popular code-bases from across github
            • +
            + + + +

            We don’t modify these open-source models except to add a torch.compile call wrapping them.

            + +

            We then measure speedups and validate accuracy across these models. Since speedups can be dependent on data-type, we measure speedups on both float32 and Automatic Mixed Precision (AMP). We report an uneven weighted average speedup of 0.75 * AMP + 0.25 * float32 since we find AMP is more common in practice.

            + +

            Across these 163 open-source models torch.compile works 93% of time, and the model runs 43% faster in training on an NVIDIA A100 GPU. At Float32 precision, it runs 21% faster on average and at AMP Precision it runs 51% faster on average.

            + +

            Caveats: On a desktop-class GPU such as a NVIDIA 3090, we’ve measured that speedups are lower than on server-class GPUs such as A100. As of today, our default backend TorchInductor supports CPUs and NVIDIA Volta and Ampere GPUs. It does not (yet) support other GPUs, xPUs or older NVIDIA GPUs.

            + +

            + +

            Speedups for torch.compile against eager mode on an NVIDIA A100 GPU
            +

            + +

            Try it: torch.compile is in the early stages of development. Starting today, you can try out torch.compile in the nightly binaries. We expect to ship the first stable 2.0 release in early March 2023.

            + +

            In the roadmap of PyTorch 2.x we hope to push the compiled mode further and further in terms of performance and scalability. Some of this work is in-flight, as we talked about at the Conference today. Some of this work has not started yet. Some of this work is what we hope to see, but don’t have the bandwidth to do ourselves. If you are interested in contributing, come chat with us at the Ask the Engineers: 2.0 Live Q&A Series starting this month (details at the end of this post) and/or via Github / Forums.

            + +

            + +

            + +

            Testimonials

            + +

            Here is what some of PyTorch’s users have to say about our new direction:

            + +

            Sylvain Gugger the primary maintainer of HuggingFace transformers:

            + +

            “With just one line of code to add, PyTorch 2.0 gives a speedup between 1.5x and 2.x in training Transformers models. This is the most exciting thing since mixed precision training was introduced!”

            + +

            Ross Wightman the primary maintainer of TIMM (one of the largest vision model hubs within the PyTorch ecosystem):

            + +

            “It just works out of the box with majority of TIMM models for inference and train workloads with no code changes”

            + +

            Luca Antiga the CTO of Lightning AI and one of the primary maintainers of PyTorch Lightning

            + +

            “PyTorch 2.0 embodies the future of deep learning frameworks. The possibility to capture a PyTorch program with effectively no user intervention and get massive on-device speedups and program manipulation out of the box unlocks a whole new dimension for AI developers.”

            + +

            Motivation

            + +

            Our philosophy on PyTorch has always been to keep flexibility and hackability our top priority, and performance as a close second. We strived for:

            + +
              +
            1. High-Performance eager execution
            2. +
            3. Pythonic internals
            4. +
            5. Good abstractions for Distributed, Autodiff, Data loading, Accelerators, etc.
            6. +
            + +

            Since we launched PyTorch in 2017, hardware accelerators (such as GPUs) have become ~15x faster in compute and about ~2x faster in the speed of memory access. So, to keep eager execution at high-performance, we’ve had to move substantial parts of PyTorch internals into C++. Moving internals into C++ makes them less hackable and increases the barrier of entry for code contributions.

            + +

            From day one, we knew the performance limits of eager execution. In July 2017, we started our first research project into developing a Compiler for PyTorch. The compiler needed to make a PyTorch program fast, but not at the cost of the PyTorch experience. Our key criteria was to preserve certain kinds of flexibility – support for dynamic shapes and dynamic programs which researchers use in various stages of exploration.

            + +

            + +

            + +

            Technology Overview

            + +

            Over the years, we’ve built several compiler projects within PyTorch. Let us break down the compiler into three parts:

            + +
              +
            • graph acquisition
            • +
            • graph lowering
            • +
            • graph compilation
            • +
            + +

            Graph acquisition was the harder challenge when building a PyTorch compiler.

            + +

            In the past 5 years, we built torch.jit.trace, TorchScript, FX tracing, Lazy Tensors. But none of them felt like they gave us everything we wanted. Some were flexible but not fast, some were fast but not flexible and some were neither fast nor flexible. Some had bad user-experience (like being silently wrong). While TorchScript was promising, it needed substantial changes to your code and the code that your code depended on. This need for substantial change in code made it a non-starter for a lot of PyTorch users.

            + +

            + +

            The PyTorch compilation process
            +

            + +

            TorchDynamo: Acquiring Graphs reliably and fast

            + +

            Earlier this year, we started working on TorchDynamo, an approach that uses a CPython feature introduced in PEP-0523 called the Frame Evaluation API. We took a data-driven approach to validate its effectiveness on Graph Capture. We used 7,000+ Github projects written in PyTorch as our validation set. While TorchScript and others struggled to even acquire the graph 50% of the time, often with a big overhead, TorchDynamo acquired the graph 99% of the time, correctly, safely and with negligible overhead – without needing any changes to the original code. This is when we knew that we finally broke through the barrier that we were struggling with for many years in terms of flexibility and speed.

            + +

            TorchInductor: fast codegen using a define-by-run IR

            + +

            For a new compiler backend for PyTorch 2.0, we took inspiration from how our users were writing high performance custom kernels: increasingly using the Triton language. We also wanted a compiler backend that used similar abstractions to PyTorch eager, and was general purpose enough to support the wide breadth of features in PyTorch. TorchInductor uses a pythonic define-by-run loop level IR to automatically map PyTorch models into generated Triton code on GPUs and C++/OpenMP on CPUs. TorchInductor’s core loop level IR contains only ~50 operators, and it is implemented in Python, making it easily hackable and extensible.

            + +

            AOTAutograd: reusing Autograd for ahead-of-time graphs

            + +

            For PyTorch 2.0, we knew that we wanted to accelerate training. Thus, it was critical that we not only captured user-level code, but also that we captured backpropagation. Moreover, we knew that we wanted to reuse the existing battle-tested PyTorch autograd system. AOTAutograd leverages PyTorch’s torch_dispatch extensibility mechanism to trace through our Autograd engine, allowing us to capture the backwards pass “ahead-of-time”. This allows us to accelerate both our forwards and backwards pass using TorchInductor.

            + +

            PrimTorch: Stable Primitive operators

            + +

            Writing a backend for PyTorch is challenging. PyTorch has 1200+ operators, and 2000+ if you consider various overloads for each operator.

            + +

            + +

            A breakdown of the 2000+ PyTorch operators
            +

            + +

            Hence, writing a backend or a cross-cutting feature becomes a draining endeavor. Within the PrimTorch project, we are working on defining smaller and stable operator sets. PyTorch programs can consistently be lowered to these operator sets. We aim to define two operator sets:

            + +
              +
            • Prim ops with about ~250 operators, which are fairly low-level. These are suited for compilers because they are low-level enough that you need to fuse them back together to get good performance.
            • +
            • ATen ops with about ~750 canonical operators and suited for exporting as-is. These are suited for backends that already integrate at the ATen level or backends that won’t have compilation to recover performance from a lower-level operator set like Prim ops.
            • +
            + +

            We discuss more about this topic below in the Developer/Vendor Experience section

            + +

            User Experience

            + +

            We introduce a simple function torch.compile that wraps your model and returns a compiled model.

            + +
            compiled_model = torch.compile(model)
            +
            + +

            This compiled_model holds a reference to your model and compiles the forward function to a more optimized version. When compiling the model, we give a few knobs to adjust it:

            + +
            def torch.compile(model: Callable,
            +  *,
            +  mode: Optional[str] = "default",
            +  dynamic: bool = False,
            +  fullgraph:bool = False,
            +  backend: Union[str, Callable] = "inductor",
            +  # advanced backend options go here as kwargs
            +  **kwargs
            +) -> torch._dynamo.NNOptimizedModule
            +
            + +
              +
            • +

              mode specifies what the compiler should be optimizing while compiling.

              + +
                +
              • The default mode is a preset that tries to compile efficiently without taking too long to compile or using extra memory.
              • +
              • Other modes such as reduce-overhead reduce the framework overhead by a lot more, but cost a small amount of extra memory. max-autotune compiles for a long time, trying to give you the fastest code it can generate.
              • +
              +
            • +
            • dynamic specifies whether to enable the code path for Dynamic Shapes. Certain compiler optimizations cannot be applied to dynamic shaped programs. Making it explicit whether you want a compiled program with dynamic shapes or with static shapes will help the compiler give you better optimized code.
            • +
            • fullgraph is similar to Numba’s nopython. It compiles the entire program into a single graph or gives an error explaining why it could not do so. Most users don’t need to use this mode. If you are very performance conscious, then you try to use it.
            • +
            • backend specifies which compiler backend to use. By default, TorchInductor is used, but there are a few others available.
            • +
            + +

            + +

            + +

            The compile experience intends to deliver most benefits and the most flexibility in the default mode. Here is a mental model of what you get in each mode.

            + +

            Now, let us look at a full example of compiling a real model and running it (with random data)

            + +
            import torch
            +import torchvision.models as models
            +
            +model = models.resnet18().cuda()
            +optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
            +compiled_model = torch.compile(model)
            +
            +x = torch.randn(16, 3, 224, 224).cuda()
            +optimizer.zero_grad()
            +out = compiled_model(x)
            +out.sum().backward()
            +optimizer.step()
            +
            + +

            The first time you run the compiled_model(x), it compiles the model. Hence, it takes longer to run. Subsequent runs are fast.

            + +

            Modes

            + +

            The compiler has a few presets that tune the compiled model in different ways. +You might be running a small model that is slow because of framework overhead. Or, you might be running a large model that barely fits into memory. Depending on your need, you might want to use a different mode.

            + +
            # API NOT FINAL
            +# default: optimizes for large models, low compile-time
            +#          and no extra memory usage
            +torch.compile(model)
            +
            +# reduce-overhead: optimizes to reduce the framework overhead
            +#                and uses some extra memory. Helps speed up small models
            +torch.compile(model, mode="reduce-overhead")
            +
            +# max-autotune: optimizes to produce the fastest model,
            +#               but takes a very long time to compile
            +torch.compile(model, mode="max-autotune")
            +
            +
            + +

            Reading and updating Attributes

            + +

            Accessing model attributes work as they would in eager mode. +You can access or modify attributes of your model (such as model.conv1.weight) as you generally would. This is completely safe and sound in terms of code correction. TorchDynamo inserts guards into the code to check if its assumptions hold true. If attributes change in certain ways, then TorchDynamo knows to recompile automatically as needed.

            + +
            # optimized_model works similar to model, feel free to access its attributes and modify them
            +optimized_model.conv1.weight.fill_(0.01)
            +
            +# this change is reflected in model
            +
            + +

            Hooks

            + +

            Module and Tensor hooks don’t fully work at the moment, but they will eventually work as we finish development.

            + +

            Serialization

            + +

            You can serialize the state-dict of the optimized_model OR the model. They point to the same parameters and state and hence are equivalent.

            + +
            torch.save(optimized_model.state_dict(), "foo.pt")
            +# both these lines of code do the same thing
            +torch.save(model.state_dict(), "foo.pt")
            +
            + +

            You cannot serialize optimized_model currently. If you wish to save the object directly, save model instead.

            + +
            torch.save(optimized_model, "foo.pt") # Error
            +torch.save(model, "foo.pt")           # Works
            +
            + +

            Inference and Export

            + +

            For model inference, after generating a compiled model using torch.compile, run some warm-up steps before actual model serving. This helps mitigate latency spikes during initial serving.

            + +

            In addition, we will be introducing a mode called torch.export that carefully exports the entire model and the guard infrastructure for environments that need guaranteed and predictable latency. torch.export would need changes to your program, especially if you have data dependent control-flow.

            + +
            # API Not Final
            +exported_model = torch._dynamo.export(model, input)
            +torch.save(exported_model, "foo.pt")
            +
            + +

            This is in early stages of development. Catch the talk on Export Path at the PyTorch Conference for more details. You can also engage on this topic at our “Ask the Engineers: 2.0 Live Q&A Series” starting this month (more details at the end of this post).

            + +

            Debugging Issues

            + +

            A compiled mode is opaque and hard to debug. You will have questions such as:

            + +
              +
            • Why is my program crashing in compiled mode?
            • +
            • Is compiled mode as accurate as eager mode?
            • +
            • Why am I not seeing speedups?
            • +
            + +

            If compiled mode produces an error or a crash or diverging results from eager mode (beyond machine precision limits), it is very unlikely that it is your code’s fault. However, understanding what piece of code is the reason for the bug is useful.

            + +

            To aid in debugging and reproducibility, we have created several tools and logging capabilities out of which one stands out: The Minifier.

            + +

            The minifier automatically reduces the issue you are seeing to a small snippet of code. This small snippet of code reproduces the original issue and you can file a github issue with the minified code. This will help the PyTorch team fix the issue easily and quickly.

            + +

            If you are not seeing the speedups that you expect, then we have the torch._dynamo.explain tool that explains which parts of your code induced what we call “graph breaks”. Graph breaks generally hinder the compiler from speeding up the code, and reducing the number of graph breaks likely will speed up your code (up to some limit of diminishing returns).

            + +

            You can read about these and more in our troubleshooting guide.

            + +

            Dynamic Shapes

            + +

            When looking at what was necessary to support the generality of PyTorch code, one key requirement was supporting dynamic shapes, and allowing models to take in tensors of different sizes without inducing recompilation every time the shape changes.

            + +

            As of today, support for Dynamic Shapes is limited and a rapid work in progress. It will be fully featured by stable release. It is gated behind a dynamic=True argument, and we have more progress on a feature branch (symbolic-shapes), on which we have successfully run BERT_pytorch in training with full symbolic shapes with TorchInductor. For inference with dynamic shapes, we have more coverage. For example, let’s look at a common setting where dynamic shapes are helpful - text generation with language models.

            + +

            We can see that even when the shape changes dynamically from 4 all the way to 256, Compiled mode is able to consistently outperform eager by up to 40%. Without support for dynamic shapes, a common workaround is to pad to the nearest power of two. However, as we can see from the charts below, it incurs a significant amount of performance overhead, and also results in significantly longer compilation time. Moreover, padding is sometimes non-trivial to do correctly.

            + +

            By supporting dynamic shapes in PyTorch 2.0’s Compiled mode, we can get the best of performance and ease of use.

            + +
            + + +
            + +

            The current work is evolving very rapidly and we may temporarily let some models regress as we land fundamental improvements to infrastructure. The latest updates for our progress on dynamic shapes can be found here.

            + +

            Distributed

            + +

            In summary, torch.distributed’s two main distributed wrappers work well in compiled mode.

            + +

            Both DistributedDataParallel (DDP) and FullyShardedDataParallel (FSDP) work in compiled mode and provide improved performance and memory utilization relative to eager mode, with some caveats and limitations.

            + +

            +

            Speedups in AMP Precision
            + +
            Left: speedups for FSDP in Compiled mode over eager mode (AMP precision).
            +Right: FSDP in Compiled mode takes substantially lesser memory than in eager mode
            +

            + +
            + + +
            + +

            DistributedDataParallel (DDP)

            + +

            DDP relies on overlapping AllReduce communications with backwards computation, and grouping smaller per-layer AllReduce operations into ‘buckets’ for greater efficiency. AOTAutograd functions compiled by TorchDynamo prevent communication overlap, when combined naively with DDP, but performance is recovered by compiling separate subgraphs for each ‘bucket’ and allowing communication ops to happen outside and in-between the subgraphs. DDP support in compiled mode also currently requires static_graph=False. See this post for more details on the approach and results for DDP + TorchDynamo.

            + +

            FullyShardedDataParallel (FSDP)

            + +

            FSDP itself is a “beta” PyTorch feature and has a higher level of system complexity than DDP due to the ability to tune which submodules are wrapped and because there are generally more configuration options. FSDP works with TorchDynamo and TorchInductor for a variety of popular models, if configured with the use_original_params=True flag. Some compatibility issues with particular models or configurations are expected at this time, but will be actively improved, and particular models can be prioritized if github issues are filed.

            + +

            Users specify an auto_wrap_policy argument to indicate which submodules of their model to wrap together in an FSDP instance used for state sharding, or manually wrap submodules in FSDP instances. For example, many transformer models work well when each ‘transformer block’ is wrapped in a separate FSDP instance and thus only the full state of one transformer block needs to be materialized at one time. Dynamo will insert graph breaks at the boundary of each FSDP instance, to allow communication ops in forward (and backward) to happen outside the graphs and in parallel to computation.

            + +

            If FSDP is used without wrapping submodules in separate instances, it falls back to operating similarly to DDP, but without bucketing. Hence all gradients are reduced in one operation, and there can be no compute/communication overlap even in Eager. This configuration has only been tested with TorchDynamo for functionality but not for performance.

            + +

            Developer/Vendor Experience

            + +

            With PyTorch 2.0, we want to simplify the backend (compiler) integration experience. To do this, we have focused on reducing the number of operators and simplifying the semantics of the operator set necessary to bring up a PyTorch backend.

            + +

            In graphical form, the PT2 stack looks like:

            + +

            + +

            + +

            Starting in the middle of the diagram, AOTAutograd dynamically captures autograd logic in an ahead-of-time fashion, producing a graph of forward and backwards operators in FX graph format.

            + +

            We provide a set of hardened decompositions (i.e. operator implementations written in terms of other operators) that can be leveraged to reduce the number of operators a backend is required to implement. We also simplify the semantics of PyTorch operators by selectively rewriting complicated PyTorch logic including mutations and views via a process called functionalization, as well as guaranteeing operator metadata information such as shape propagation formulas. This work is actively in progress; our goal is to provide a primitive and stable set of ~250 operators with simplified semantics, called PrimTorch, that vendors can leverage (i.e. opt-in to) in order to simplify their integrations.
            +After reducing and simplifying the operator set, backends may choose to integrate at the Dynamo (i.e. the middle layer, immediately after AOTAutograd) or Inductor (the lower layer). We describe some considerations in making this choice below, as well as future work around mixtures of backends.

            + +

            Dynamo Backend

            + +

            Vendors with existing compiler stacks may find it easiest to integrate as a TorchDynamo backend, receiving an FX Graph in terms of ATen/Prims IR. Note that for both training and inference, the integration point would be immediately after AOTAutograd, since we currently apply decompositions as part of AOTAutograd, and merely skip the backward-specific steps if targeting inference.

            + +

            Inductor backend

            + +

            Vendors can also integrate their backend directly into Inductor. Inductor takes in a graph produced by AOTAutograd that consists of ATen/Prim operations, and further lowers them down to a loop level IR. Today, Inductor provides lowerings to its loop-level IR for pointwise, reduction, scatter/gather and window operations. In addition, Inductor creates fusion groups, does indexing simplification, dimension collapsing, and tunes loop iteration order in order to support efficient code generation. Vendors can then integrate by providing the mapping from the loop level IR to hardware-specific code. Currently, Inductor has two backends: (1) C++ that generates multithreaded CPU code, (2) Triton that generates performant GPU code. These Inductor backends can be used as an inspiration for the alternate backends.

            + +

            Mixture of Backends Interface (coming soon)

            + +

            We have built utilities for partitioning an FX graph into subgraphs that contain operators supported by a backend and executing the remainder eagerly. These utilities can be extended to support a “mixture of backends,” configuring which portions of the graphs to run for which backend. However, there is not yet a stable interface or contract for backends to expose their operator support, preferences for patterns of operators, etc. This remains as ongoing work, and we welcome feedback from early adopters.

            + +

            Final Thoughts

            + +

            We are super excited about the direction that we’ve taken for PyTorch 2.0 and beyond. The road to the final 2.0 release is going to be rough, but come join us on this journey early-on. If you are interested in deep-diving further or contributing to the compiler, please continue reading below which includes more information on how to get started (e.g., tutorials, benchmarks, models, FAQs) and Ask the Engineers: 2.0 Live Q&A Series starting this month. Additional resources include:

            + + + + + + + + + +

            Accelerating Hugging Face and TIMM models with PyTorch 2.0

            + +

            Author: Mark Saroufim

            + +

            torch.compile() makes it easy to experiment with different compiler backends to make PyTorch code faster with a single line decorator torch.compile(). It works either directly over an nn.Module as a drop-in replacement for torch.jit.script() but without requiring you to make any source code changes. We expect this one line code change to provide you with between 30%-2x training time speedups on the vast majority of models that you’re already running.

            + +
            opt_module = torch.compile(module)
            +
            + +

            torch.compile supports arbitrary PyTorch code, control flow, mutation and comes with experimental support for dynamic shapes. We’re so excited about this development that we call it PyTorch 2.0.

            + +

            What makes this announcement different for us is we’ve already benchmarked some of the most popular open source PyTorch models and gotten substantial speedups ranging from 30% to 2x https://github.com/pytorch/torchdynamo/issues/681.

            + +

            There are no tricks here, we’ve pip installed popular libraries like https://github.com/huggingface/transformers, https://github.com/huggingface/accelerate and https://github.com/rwightman/pytorch-image-models and then ran torch.compile() on them and that’s it.

            + +

            It’s rare to get both performance and convenience, but this is why the core team finds PyTorch 2.0 so exciting.

            + +

            Requirements

            + +

            For GPU (newer generation GPUs will see drastically better performance)

            + +
            pip3 install numpy --pre torch --force-reinstall --index-url https://download.pytorch.org/whl/nightly/cu117
            +
            + +

            For CPU

            + +
            pip3 install --pre torch --index-url https://download.pytorch.org/whl/nightly/cpu
            +
            + +

            Optional: Verify Installation

            + +
            git clone https://github.com/pytorch/pytorch
            +cd tools/dynamo
            +python verify_dynamo.py
            +
            + +

            Optional: Docker installation

            + +

            We also provide all the required dependencies in the PyTorch nightly +binaries which you can download with

            + +
            docker pull ghcr.io/pytorch/pytorch-nightly
            +
            + +

            And for ad hoc experiments just make sure that your container has access to all your GPUs

            + +
            docker run --gpus all -it ghcr.io/pytorch/pytorch-nightly:latest /bin/bash
            +
            + +

            Getting Started

            + +

            Please read Mark Saroufim’s full blog post where he walks you through a tutorial and real models for you to try PyTorch 2.0 today.

            + +

            Our goal with PyTorch was to build a breadth-first compiler that would speed up the vast majority of actual models people run in open source. The Hugging Face Hub ended up being an extremely valuable benchmarking tool for us, ensuring that any optimization we work on actually helps accelerate models people want to run.

            + +

            The blog tutorial will show you exactly how to replicate those speedups so you can be as excited as to PyTorch 2.0 as we are. So please try out PyTorch 2.0, enjoy the free perf and if you’re not seeing it then please open an issue and we will make sure your model is supported https://github.com/pytorch/torchdynamo/issues

            + +

            After all, we can’t claim we’re created a breadth-first unless YOUR models actually run faster.

            + +

            FAQs

            + +
              +
            1. +

              What is PT 2.0?
              +2.0 is the latest PyTorch version. PyTorch 2.0 offers the same eager-mode development experience, while adding a compiled mode via torch.compile. This compiled mode has the potential to speedup your models during training and inference.

              +
            2. +
            3. +

              Why 2.0 instead of 1.14?
              +PyTorch 2.0 is what 1.14 would have been. We were releasing substantial new features that we believe change how you meaningfully use PyTorch, so we are calling it 2.0 instead.

              +
            4. +
            5. +

              How do I install 2.0? Any additional requirements?

              + +

              Install the latest nightlies:

              + +

              CUDA 11.8

              +
               pip3 install numpy --pre torch torchvision torchaudio --force-reinstall --index-url https://download.pytorch.org/whl/nightly/cu118
              +
              +

              CUDA 11.7

              +
               pip3 install numpy --pre torch torchvision torchaudio --force-reinstall --index-url https://download.pytorch.org/whl/nightly/cu117
              +
              +

              CPU

              +
               pip3 install numpy --pre torch torchvision torchaudio --force-reinstall --index-url https://download.pytorch.org/whl/nightly/cpu
              +
              +
            6. +
            7. +

              Is 2.0 code backwards-compatible with 1.X?
              +Yes, using 2.0 will not require you to modify your PyTorch workflows. A single line of code model = torch.compile(model) can optimize your model to use the 2.0 stack, and smoothly run with the rest of your PyTorch code. This is completely opt-in, and you are not required to use the new compiler.

              +
            8. +
            9. +

              Is 2.0 enabled by default?
              +2.0 is the name of the release. torch.compile is the feature released in 2.0, and you need to explicitly use torch.compile.

              +
            10. +
            11. How do I migrate my PT1.X code to PT2.0?
              +Your code should be working as-is without the need for any migrations. If you want to use the new Compiled mode feature introduced in 2.0, then you can start by optimizing your model with one line: model = torch.compile(model).
              +While the speedups are primarily observed during training, you can also use it for inference if your model runs faster than eager mode. +
               import torch
              +      
              + def train(model, dataloader):
              +   model = torch.compile(model)
              +   for batch in dataloader:
              +     run_epoch(model, batch)
              +
              + def infer(model, input):
              +   model = torch.compile(model)
              +   return model(\*\*input)
              +
              +
            12. +
            13. +

              Why should I use PT2.0 instead of PT 1.X?
              +See answer to Question (2).

              +
            14. +
            15. What is my code doing differently when running PyTorch 2.0?
              +Out of the box, PyTorch 2.0 is the same as PyTorch 1.x, your models run in eager-mode i.e. every line of Python is executed one after the other.
              +In 2.0, if you wrap your model in model = torch.compile(model), your model goes through 3 steps before execution: +
                +
              1. Graph acquisition: first the model is rewritten as blocks of subgraphs. Subgraphs which can be compiled by TorchDynamo are “flattened” and the other subgraphs (which might contain control-flow code or other unsupported Python constructs) will fall back to Eager-Mode.
              2. +
              3. Graph lowering: all the PyTorch operations are decomposed into their constituent kernels specific to the chosen backend.
              4. +
              5. Graph compilation, where the kernels call their corresponding low-level device-specific operations.
              6. +
              +
            16. +
            17. What new components does PT2.0 add to PT? +
                +
              • TorchDynamo generates FX Graphs from Python bytecode. It maintains the eager-mode capabilities using guards to ensure the generated graphs are valid (read more)
              • +
              • AOTAutograd to generate the backward graph corresponding to the forward graph captured by TorchDynamo (read more).
              • +
              • PrimTorch to decompose complicated PyTorch operations into simpler and more elementary ops (read more).
              • +
              • [Backend] Backends integrate with TorchDynamo to compile the graph into IR that can run on accelerators. For example, TorchInductor compiles the graph to either Triton for GPU execution or OpenMP for CPU execution (read more).
              • +
              +
            18. +
            19. +

              What compiler backends does 2.0 currently support?
              +The default and the most complete backend is TorchInductor, but TorchDynamo has a growing list of backends that can be found by calling torchdynamo.list_backends().

              +
            20. +
            21. +

              How does distributed training work with 2.0?
              +DDP and FSDP in Compiled mode can run up to 15% faster than Eager-Mode in FP32 and up to 80% faster in AMP precision. PT2.0 does some extra optimization to ensure DDP’s communication-computation overlap works well with Dynamo’s partial graph creation. Ensure you run DDP with static_graph=False. More details here.

              +
            22. +
            23. +

              How can I learn more about PT2.0 developments?
              +The PyTorch Developers forum is the best place to learn about 2.0 components directly from the developers who build them.

              +
            24. +
            25. +

              Help my code is running slower with 2.0’s Compiled Mode!
              +The most likely reason for performance hits is too many graph breaks. For instance, something innocuous as a print statement in your model’s forward triggers a graph break. We have ways to diagnose these - read more here.

              +
            26. +
            27. My previously-running code is crashing with 2.0’s Compiled Mode! How do I debug it?
              +Here are some techniques to triage where your code might be failing, and printing helpful logs: https://pytorch.org/docs/stable/torch.compiler_faq.html#why-is-my-code-crashing.
            28. +
            + +

            Ask the Engineers: 2.0 Live Q&A Series

            + +

            We will be hosting a series of live Q&A sessions for the community to have deeper questions and dialogue with the experts. Please check back to see the full calendar of topics throughout the year. If you are unable to attend: 1) They will be recorded for future viewing and 2) You can attend our Dev Infra Office Hours every Friday at 10 AM PST @ https://github.com/pytorch/pytorch/wiki/Dev-Infra-Office-Hours.

            + +

            Please click here to see dates, times, descriptions and links.

            + +

            Disclaimer: Please do not share your personal information, last name, company when joining the live sessions and submitting questions.

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            TOPICHOST
            The new developer experience of using 2.0 (install, setup, clone an example, run with 2.0)Suraj Subramanian
            + LinkedIn | + Twitter +
            PT2 Profiling and DebuggingBert Maher
            + LinkedIn | + Twitter +
            A deep dive on TorchInductor and PT 2.0 Backend IntegrationNatalia Gimelshein, Bin Bao and Sherlock Huang
            + Natalia Gimelshein
            + LinkedIn
            + Sherlock Huang
            + LinkedIn +
            Extend PyTorch without C++ and functorch: JAX-like composable function transforms for PyTorchAnjali Chourdia and Samantha Andow
            + Anjali Chourdia
            + LinkedIn | + Twitter
            + Samantha Andow
            + LinkedIn | + Twitter +
            A deep dive on TorchDynamoMichael Voznesensky
            + LinkedIn +
            Rethinking data loading with TorchData:Datapipes and Dataloader2Kevin Tse
            + LinkedIn +
            Composable training (+ torcheval, torchsnapshot)Ananth Subramaniam
            How and why contribute code and tutorials to PyTorchZain Rizvi, Svetlana Karslioglu and Carl Parker
            + Zain Rizvi
            + LinkedIn | + Twitter
            + Svetlana Karslioglu
            + LinkedIn | + Twitter +
            Dynamic Shapes and Calculating Maximum Batch SizeEdward Yang and Elias Ellison
            + Edward Yang
            + Twitter +
            PyTorch 2.0 Export: Sound Whole Graph Capture for PyTorchMichael Suo and Yanan Cao
            + Yanan Cao
            + LinkedIn +
            2-D Parallelism using DistributedTensor and PyTorch DistributedTensorWanchao Liang and Alisson Gusatti Azzolini
            + Wanchao Liang
            + LinkedIn | + Twitter
            + Alisson Gusatti Azzolini
            + LinkedIn +
            TorchRec and FSDP in ProductionDennis van der Staay, Andrew Gu and Rohan Varma
            + Dennis van der Staay
            + LinkedIn
            + Rohan Varma
            + LinkedIn | + Twitter +
            The Future of PyTorch On-DeviceRaziel Alvarez Guevara
            + LinkedIn | + Twitter +
            TorchMultiModal
            + Intro Blog
            + Scaling Blog
            Kartikay Khandelwal
            + LinkedIn | + Twitter +
            BetterTransformers (+ integration with Hugging Face), Model Serving and Optimizations
            + Blog 1
            + Github
            Hamid Shojanazeri and Mark Saroufim
            + Mark Saroufim
            + LinkedIn | + Twitter +
            PT2 and DistributedWill Constable
            + LinkedIn +
            + +

            Watch the Talks from PyTorch Conference

            + + + + + +
            +
            +
            +
            + +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + diff --git a/_get_started/installation/aws.md b/get_started/installation/aws.md similarity index 100% rename from _get_started/installation/aws.md rename to get_started/installation/aws.md diff --git a/_get_started/installation/azure.md b/get_started/installation/azure.md similarity index 100% rename from _get_started/installation/azure.md rename to get_started/installation/azure.md diff --git a/_get_started/installation/google-cloud.md b/get_started/installation/google-cloud.md similarity index 100% rename from _get_started/installation/google-cloud.md rename to get_started/installation/google-cloud.md diff --git a/_get_started/installation/lightning-studios.md b/get_started/installation/lightning-studios.md similarity index 100% rename from _get_started/installation/lightning-studios.md rename to get_started/installation/lightning-studios.md diff --git a/_get_started/installation/linux.md b/get_started/installation/linux.md similarity index 100% rename from _get_started/installation/linux.md rename to get_started/installation/linux.md diff --git a/_get_started/installation/mac.md b/get_started/installation/mac.md similarity index 100% rename from _get_started/installation/mac.md rename to get_started/installation/mac.md diff --git a/_get_started/installation/windows.md b/get_started/installation/windows.md similarity index 100% rename from _get_started/installation/windows.md rename to get_started/installation/windows.md diff --git a/github-stars.json b/github-stars.json index 0e218ba09be4..5979d207704a 100644 --- a/github-stars.json +++ b/github-stars.json @@ -1,13 +1,265 @@ ---- ---- - { "data": [ - {% for item in site.hub %} + { - "id": "{{ item.github-id }}" + "id": "datvuthanh/HybridNets" } - {% if forloop.last != true %},{% endif %} - {% endfor %} + , + + { + "id": "facebookresearch/WSL-Images" + } + , + + { + "id": "facebookresearch/pytorch_GAN_zoo" + } + , + + { + "id": "facebookresearch/pytorch_GAN_zoo" + } + , + + { + "id": "facebookresearch/pytorchvideo" + } + , + + { + "id": "facebookresearch/pytorchvideo" + } + , + + { + "id": "facebookresearch/pytorchvideo" + } + , + + { + "id": "facebookresearch/semi-supervised-ImageNet1K-models" + } + , + + { + "id": "huggingface/transformers" + } + , + + { + "id": "hustvl/YOLOP" + } + , + + { + "id": "intel-isl/MiDaS" + } + , + + { + "id": "mateuszbuda/brain-segmentation-pytorch" + } + , + + { + "id": "nicolalandro/ntsnet-cub200" + } + , + + { + "id": "NVIDIA/DeepLearningExamples" + } + , + + { + "id": "NVIDIA/DeepLearningExamples" + } + , + + { + "id": "NVIDIA/DeepLearningExamples" + } + , + + { + "id": "NVIDIA/DeepLearningExamples" + } + , + + { + "id": "NVIDIA/DeepLearningExamples" + } + , + + { + "id": "NVIDIA/DeepLearningExamples" + } + , + + { + "id": "NVIDIA/DeepLearningExamples" + } + , + + { + "id": "NVIDIA/DeepLearningExamples" + } + , + + { + "id": "NVIDIA/DeepLearningExamples" + } + , + + { + "id": "NVIDIA/DeepLearningExamples" + } + , + + { + "id": "pytorch/fairseq" + } + , + + { + "id": "pytorch/fairseq" + } + , + + { + "id": "pytorch/vision" + } + , + + { + "id": "pytorch/vision" + } + , + + { + "id": "pytorch/vision" + } + , + + { + "id": "pytorch/vision" + } + , + + { + "id": "huawei-noah/ghostnet" + } + , + + { + "id": "pytorch/vision" + } + , + + { + "id": "PingoLH/Pytorch-HarDNet" + } + , + + { + "id": "XingangPan/IBN-Net" + } + , + + { + "id": "pytorch/vision" + } + , + + { + "id": "szq0214/MEAL-V2" + } + , + + { + "id": "pytorch/vision" + } + , + + { + "id": "mit-han-lab/once-for-all" + } + , + + { + "id": "mit-han-lab/ProxylessNAS" + } + , + + { + "id": "zhanghang1989/ResNeSt" + } + , + + { + "id": "pytorch/vision" + } + , + + { + "id": "pytorch/vision" + } + , + + { + "id": "pytorch/vision" + } + , + + { + "id": "huawei-noah/Efficient-AI-Backbones" + } + , + + { + "id": "pytorch/vision" + } + , + + { + "id": "pytorch/vision" + } + , + + { + "id": "pytorch/vision" + } + , + + { + "id": "sigsep/open-unmix-pytorch" + } + , + + { + "id": "coderx7/simplenet_pytorch" + } + , + + { + "id": "snakers4/silero-models" + } + , + + { + "id": "snakers4/silero-models" + } + , + + { + "id": "snakers4/silero-vad" + } + , + + { + "id": "ultralytics/yolov5" + } + + ] } diff --git a/hub/datvuthanh_hybridnets/index.html b/hub/datvuthanh_hybridnets/index.html new file mode 100644 index 000000000000..d256069dcc95 --- /dev/null +++ b/hub/datvuthanh_hybridnets/index.html @@ -0,0 +1,502 @@ + + + + + + + + + + + + + HybridNets | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + HybridNets +

            + +
            +
            +

            By Dat Vu Thanh

            +
            + +
            +

            HybridNets - End2End Perception Network

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +

            Before You Start

            + +

            Start from a Python>=3.7 environment with PyTorch>=1.10 installed. To install PyTorch see https://pytorch.org/get-started/locally/. To install HybridNets dependencies:

            +
            pip install -qr https://raw.githubusercontent.com/datvuthanh/HybridNets/main/requirements.txt  # install dependencies
            +
            + +

            Model Description

            + +

            + +

            HybridNets is an end2end perception network for multi-tasks. Our work focused on traffic object detection, drivable area segmentation and lane detection. HybridNets can run real-time on embedded systems, and obtains SOTA Object Detection, Lane Detection on BDD100K Dataset.

            + +

            Results

            + +

            Traffic Object Detection

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            ModelRecall (%)mAP@0.5 (%)
            MultiNet81.360.2
            DLT-Net89.468.4
            Faster R-CNN77.255.6
            YOLOv5s86.877.2
            YOLOP89.276.5
            HybridNets92.877.3
            + +

            + +

            Drivable Area Segmentation

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            ModelDrivable mIoU (%)
            MultiNet71.6
            DLT-Net71.3
            PSPNet89.6
            YOLOP91.5
            HybridNets90.5
            + +

            + +

            Lane Line Detection

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            ModelAccuracy (%)Lane Line IoU (%)
            Enet34.1214.64
            SCNN35.7915.84
            Enet-SAD36.5616.02
            YOLOP70.526.2
            HybridNets85.431.6
            + +

            + +

            + +

            Load From PyTorch Hub

            + +

            This example loads the pretrained HybridNets model and passes an image for inference.

            +
            import torch
            +
            +# load model
            +model = torch.hub.load('datvuthanh/hybridnets', 'hybridnets', pretrained=True)
            +
            +#inference
            +img = torch.randn(1,3,640,384)
            +features, regression, classification, anchors, segmentation = model(img)
            +
            + +

            Citation

            + +

            If you find our paper and code useful for your research, please consider giving a star and citation:

            + +
            @misc{vu2022hybridnets,
            +      title={HybridNets: End-to-End Perception Network}, 
            +      author={Dat Vu and Bao Ngo and Hung Phan},
            +      year={2022},
            +      eprint={2203.09035},
            +      archivePrefix={arXiv},
            +      primaryClass={cs.CV}
            +}
            +
            + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/facebookresearch_WSL-Images_resnext/index.html b/hub/facebookresearch_WSL-Images_resnext/index.html new file mode 100644 index 000000000000..89a436c4cbce --- /dev/null +++ b/hub/facebookresearch_WSL-Images_resnext/index.html @@ -0,0 +1,471 @@ + + + + + + + + + + + + + ResNext WSL | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + ResNext WSL +

            + +
            +
            +

            By Facebook AI

            +
            + +
            +

            ResNext models trained with billion scale weakly-supervised data.

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +model = torch.hub.load('facebookresearch/WSL-Images', 'resnext101_32x8d_wsl')
            +# or
            +# model = torch.hub.load('facebookresearch/WSL-Images', 'resnext101_32x16d_wsl')
            +# or
            +# model = torch.hub.load('facebookresearch/WSL-Images', 'resnext101_32x32d_wsl')
            +# or
            +#model = torch.hub.load('facebookresearch/WSL-Images', 'resnext101_32x48d_wsl')
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            Here’s a sample execution.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +preprocess = transforms.Compose([
            +    transforms.Resize(256),
            +    transforms.CenterCrop(224),
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)
            +# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes
            +print(output[0])
            +# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
            +print(torch.nn.functional.softmax(output[0], dim=0))
            +
            +
            + +

            Model Description

            +

            The provided ResNeXt models are pre-trained in weakly-supervised fashion on 940 million public images with 1.5K hashtags matching with 1000 ImageNet1K synsets, followed by fine-tuning on ImageNet1K dataset. Please refer to “Exploring the Limits of Weakly Supervised Pretraining” (https://arxiv.org/abs/1805.00932) presented at ECCV 2018 for the details of model training.

            + +

            We are providing 4 models with different capacities.

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            Model#ParametersFLOPSTop-1 Acc.Top-5 Acc.
            ResNeXt-101 32x8d88M16B82.296.4
            ResNeXt-101 32x16d193M36B84.297.2
            ResNeXt-101 32x32d466M87B85.197.5
            ResNeXt-101 32x48d829M153B85.497.6
            + +

            Our models significantly improve the training accuracy on ImageNet compared to training from scratch. We achieve state-of-the-art accuracy of 85.4% on ImageNet with our ResNext-101 32x48d model.

            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/facebookresearch_pytorch-gan-zoo_dcgan/index.html b/hub/facebookresearch_pytorch-gan-zoo_dcgan/index.html new file mode 100644 index 000000000000..21ff6559a922 --- /dev/null +++ b/hub/facebookresearch_pytorch-gan-zoo_dcgan/index.html @@ -0,0 +1,387 @@ + + + + + + + + + + + + + DCGAN on FashionGen | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + DCGAN on FashionGen +

            + +
            +
            +

            By FAIR HDGAN

            +
            + +
            +

            A simple generative image model for 64x64 images

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +use_gpu = True if torch.cuda.is_available() else False
            +
            +model = torch.hub.load('facebookresearch/pytorch_GAN_zoo:hub', 'DCGAN', pretrained=True, useGPU=use_gpu)
            +
            + +

            The input to the model is a noise vector of shape (N, 120) where N is the number of images to be generated. +It can be constructed using the function .buildNoiseData. +The model has a .test function that takes in the noise vector and generates images.

            + +
            num_images = 64
            +noise, _ = model.buildNoiseData(num_images)
            +with torch.no_grad():
            +    generated_images = model.test(noise)
            +
            +# let's plot these images using torchvision and matplotlib
            +import matplotlib.pyplot as plt
            +import torchvision
            +plt.imshow(torchvision.utils.make_grid(generated_images).permute(1, 2, 0).cpu().numpy())
            +# plt.show()
            +
            + +

            You should see an image similar to the one on the left.

            + +

            If you want to train your own DCGAN and other GANs from scratch, have a look at PyTorch GAN Zoo.

            + +

            Model Description

            + +

            In computer vision, generative models are networks trained to create images from a given input. In our case, we consider a specific kind of generative networks: GANs (Generative Adversarial Networks) which learn to map a random vector with a realistic image generation.

            + +

            DCGAN is a model designed in 2015 by Radford et. al. in the paper Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks. It is a GAN architecture both very simple and efficient for low resolution image generation (up to 64x64).

            + +

            Requirements

            + +
              +
            • Currently only supports Python 3
            • +
            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/facebookresearch_pytorch-gan-zoo_pgan/index.html b/hub/facebookresearch_pytorch-gan-zoo_pgan/index.html new file mode 100644 index 000000000000..1b38d86e9538 --- /dev/null +++ b/hub/facebookresearch_pytorch-gan-zoo_pgan/index.html @@ -0,0 +1,394 @@ + + + + + + + + + + + + + Progressive Growing of GANs (PGAN) | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + Progressive Growing of GANs (PGAN) +

            + +
            +
            +

            By FAIR HDGAN

            +
            + +
            +

            High-quality image generation of fashion, celebrity faces

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +use_gpu = True if torch.cuda.is_available() else False
            +
            +# trained on high-quality celebrity faces "celebA" dataset
            +# this model outputs 512 x 512 pixel images
            +model = torch.hub.load('facebookresearch/pytorch_GAN_zoo:hub',
            +                       'PGAN', model_name='celebAHQ-512',
            +                       pretrained=True, useGPU=use_gpu)
            +# this model outputs 256 x 256 pixel images
            +# model = torch.hub.load('facebookresearch/pytorch_GAN_zoo:hub',
            +#                        'PGAN', model_name='celebAHQ-256',
            +#                        pretrained=True, useGPU=use_gpu)
            +
            + +

            The input to the model is a noise vector of shape (N, 512) where N is the number of images to be generated. +It can be constructed using the function .buildNoiseData. +The model has a .test function that takes in the noise vector and generates images.

            + +
            num_images = 4
            +noise, _ = model.buildNoiseData(num_images)
            +with torch.no_grad():
            +    generated_images = model.test(noise)
            +
            +# let's plot these images using torchvision and matplotlib
            +import matplotlib.pyplot as plt
            +import torchvision
            +grid = torchvision.utils.make_grid(generated_images.clamp(min=-1, max=1), scale_each=True, normalize=True)
            +plt.imshow(grid.permute(1, 2, 0).cpu().numpy())
            +# plt.show()
            +
            + +

            You should see an image similar to the one on the left.

            + +

            If you want to train your own Progressive GAN and other GANs from scratch, have a look at PyTorch GAN Zoo.

            + +

            Model Description

            + +

            In computer vision, generative models are networks trained to create images from a given input. In our case, we consider a specific kind of generative networks: GANs (Generative Adversarial Networks) which learn to map a random vector with a realistic image generation.

            + +

            Progressive Growing of GANs is a method developed by Karras et. al. [1] in 2017 allowing generation of high resolution images. To do so, the generative network is trained slice by slice. At first the model is trained to build very low resolution images, once it converges, new layers are added and the output resolution doubles. The process continues until the desired resolution is reached.

            + +

            Requirements

            + +
              +
            • Currently only supports Python 3
            • +
            + +

            References

            + +

            [1] Tero Karras et al, “Progressive Growing of GANs for Improved Quality, Stability, and Variation” https://arxiv.org/abs/1710.10196

            + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/facebookresearch_pytorchvideo_resnet/index.html b/hub/facebookresearch_pytorchvideo_resnet/index.html new file mode 100644 index 000000000000..e8e5f9ae7ef2 --- /dev/null +++ b/hub/facebookresearch_pytorchvideo_resnet/index.html @@ -0,0 +1,502 @@ + + + + + + + + + + + + + 3D ResNet | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + 3D ResNet +

            + +
            +
            +

            By FAIR PyTorchVideo

            +
            + +
            +

            Resnet Style Video classification networks pretrained on the Kinetics 400 dataset

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +

            Example Usage

            + +

            Imports

            + +

            Load the model:

            + +
            import torch
            +# Choose the `slow_r50` model 
            +model = torch.hub.load('facebookresearch/pytorchvideo', 'slow_r50', pretrained=True)
            +
            + +

            Import remaining functions:

            + +
            import json
            +import urllib
            +from pytorchvideo.data.encoded_video import EncodedVideo
            +
            +from torchvision.transforms import Compose, Lambda
            +from torchvision.transforms._transforms_video import (
            +    CenterCropVideo,
            +    NormalizeVideo,
            +)
            +from pytorchvideo.transforms import (
            +    ApplyTransformToKey,
            +    ShortSideScale,
            +    UniformTemporalSubsample
            +)
            +
            + +

            Setup

            + +

            Set the model to eval mode and move to desired device.

            + +
            # Set to GPU or CPU
            +device = "cpu"
            +model = model.eval()
            +model = model.to(device)
            +
            + +

            Download the id to label mapping for the Kinetics 400 dataset on which the torch hub models were trained. This will be used to get the category label names from the predicted class ids.

            + +
            json_url = "https://dl.fbaipublicfiles.com/pyslowfast/dataset/class_names/kinetics_classnames.json"
            +json_filename = "kinetics_classnames.json"
            +try: urllib.URLopener().retrieve(json_url, json_filename)
            +except: urllib.request.urlretrieve(json_url, json_filename)
            +
            + +
            with open(json_filename, "r") as f:
            +    kinetics_classnames = json.load(f)
            +
            +# Create an id to label name mapping
            +kinetics_id_to_classname = {}
            +for k, v in kinetics_classnames.items():
            +    kinetics_id_to_classname[v] = str(k).replace('"', "")
            +
            + +

            Define input transform

            + +
            side_size = 256
            +mean = [0.45, 0.45, 0.45]
            +std = [0.225, 0.225, 0.225]
            +crop_size = 256
            +num_frames = 8
            +sampling_rate = 8
            +frames_per_second = 30
            +
            +# Note that this transform is specific to the slow_R50 model.
            +transform =  ApplyTransformToKey(
            +    key="video",
            +    transform=Compose(
            +        [
            +            UniformTemporalSubsample(num_frames),
            +            Lambda(lambda x: x/255.0),
            +            NormalizeVideo(mean, std),
            +            ShortSideScale(
            +                size=side_size
            +            ),
            +            CenterCropVideo(crop_size=(crop_size, crop_size))
            +        ]
            +    ),
            +)
            +
            +# The duration of the input clip is also specific to the model.
            +clip_duration = (num_frames * sampling_rate)/frames_per_second
            +
            + +

            Run Inference

            + +

            Download an example video.

            + +
            url_link = "https://dl.fbaipublicfiles.com/pytorchvideo/projects/archery.mp4"
            +video_path = 'archery.mp4'
            +try: urllib.URLopener().retrieve(url_link, video_path)
            +except: urllib.request.urlretrieve(url_link, video_path)
            +
            + +

            Load the video and transform it to the input format required by the model.

            + +
            # Select the duration of the clip to load by specifying the start and end duration
            +# The start_sec should correspond to where the action occurs in the video
            +start_sec = 0
            +end_sec = start_sec + clip_duration
            +
            +# Initialize an EncodedVideo helper class and load the video
            +video = EncodedVideo.from_path(video_path)
            +
            +# Load the desired clip
            +video_data = video.get_clip(start_sec=start_sec, end_sec=end_sec)
            +
            +# Apply a transform to normalize the video input
            +video_data = transform(video_data)
            +
            +# Move the inputs to the desired device
            +inputs = video_data["video"]
            +inputs = inputs.to(device)
            +
            + +

            Get Predictions

            + +
            # Pass the input clip through the model
            +preds = model(inputs[None, ...])
            +
            +# Get the predicted classes
            +post_act = torch.nn.Softmax(dim=1)
            +preds = post_act(preds)
            +pred_classes = preds.topk(k=5).indices[0]
            +
            +# Map the predicted classes to the label names
            +pred_class_names = [kinetics_id_to_classname[int(i)] for i in pred_classes]
            +print("Top 5 predicted labels: %s" % ", ".join(pred_class_names))
            +
            + +

            Model Description

            +

            The model architecture is based on [1] with pretrained weights using the 8x8 setting +on the Kinetics dataset.

            + + + + + + + + + + + + + + + + + + + + + + + + +
            archdepthframe length x sample ratetop 1top 5Flops (G)Params (M)
            SlowR508x874.5891.6354.5232.45
            + +

            References

            +

            [1] Christoph Feichtenhofer et al, “SlowFast Networks for Video Recognition” +https://arxiv.org/pdf/1812.03982.pdf

            + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/facebookresearch_pytorchvideo_slowfast/index.html b/hub/facebookresearch_pytorchvideo_slowfast/index.html new file mode 100644 index 000000000000..88610adc314d --- /dev/null +++ b/hub/facebookresearch_pytorchvideo_slowfast/index.html @@ -0,0 +1,535 @@ + + + + + + + + + + + + + SlowFast | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + SlowFast +

            + +
            +
            +

            By FAIR PyTorchVideo

            +
            + +
            +

            SlowFast networks pretrained on the Kinetics 400 dataset

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +

            Example Usage

            + +

            Imports

            + +

            Load the model:

            + +
            import torch
            +# Choose the `slowfast_r50` model 
            +model = torch.hub.load('facebookresearch/pytorchvideo', 'slowfast_r50', pretrained=True)
            +
            + +

            Import remaining functions:

            + +
            from typing import Dict
            +import json
            +import urllib
            +from torchvision.transforms import Compose, Lambda
            +from torchvision.transforms._transforms_video import (
            +    CenterCropVideo,
            +    NormalizeVideo,
            +)
            +from pytorchvideo.data.encoded_video import EncodedVideo
            +from pytorchvideo.transforms import (
            +    ApplyTransformToKey,
            +    ShortSideScale,
            +    UniformTemporalSubsample,
            +    UniformCropVideo
            +) 
            +
            + +

            Setup

            + +

            Set the model to eval mode and move to desired device.

            + +
            # Set to GPU or CPU
            +device = "cpu"
            +model = model.eval()
            +model = model.to(device)
            +
            + +

            Download the id to label mapping for the Kinetics 400 dataset on which the torch hub models were trained. This will be used to get the category label names from the predicted class ids.

            + +
            json_url = "https://dl.fbaipublicfiles.com/pyslowfast/dataset/class_names/kinetics_classnames.json"
            +json_filename = "kinetics_classnames.json"
            +try: urllib.URLopener().retrieve(json_url, json_filename)
            +except: urllib.request.urlretrieve(json_url, json_filename)
            +
            + +
            with open(json_filename, "r") as f:
            +    kinetics_classnames = json.load(f)
            +
            +# Create an id to label name mapping
            +kinetics_id_to_classname = {}
            +for k, v in kinetics_classnames.items():
            +    kinetics_id_to_classname[v] = str(k).replace('"', "")
            +
            + +

            Define input transform

            + +
            side_size = 256
            +mean = [0.45, 0.45, 0.45]
            +std = [0.225, 0.225, 0.225]
            +crop_size = 256
            +num_frames = 32
            +sampling_rate = 2
            +frames_per_second = 30
            +slowfast_alpha = 4
            +num_clips = 10
            +num_crops = 3
            +
            +class PackPathway(torch.nn.Module):
            +    """
            +    Transform for converting video frames as a list of tensors. 
            +    """
            +    def __init__(self):
            +        super().__init__()
            +        
            +    def forward(self, frames: torch.Tensor):
            +        fast_pathway = frames
            +        # Perform temporal sampling from the fast pathway.
            +        slow_pathway = torch.index_select(
            +            frames,
            +            1,
            +            torch.linspace(
            +                0, frames.shape[1] - 1, frames.shape[1] // slowfast_alpha
            +            ).long(),
            +        )
            +        frame_list = [slow_pathway, fast_pathway]
            +        return frame_list
            +
            +transform =  ApplyTransformToKey(
            +    key="video",
            +    transform=Compose(
            +        [
            +            UniformTemporalSubsample(num_frames),
            +            Lambda(lambda x: x/255.0),
            +            NormalizeVideo(mean, std),
            +            ShortSideScale(
            +                size=side_size
            +            ),
            +            CenterCropVideo(crop_size),
            +            PackPathway()
            +        ]
            +    ),
            +)
            +
            +# The duration of the input clip is also specific to the model.
            +clip_duration = (num_frames * sampling_rate)/frames_per_second
            +
            + +

            Run Inference

            + +

            Download an example video.

            + +
            url_link = "https://dl.fbaipublicfiles.com/pytorchvideo/projects/archery.mp4"
            +video_path = 'archery.mp4'
            +try: urllib.URLopener().retrieve(url_link, video_path)
            +except: urllib.request.urlretrieve(url_link, video_path)
            +
            + +

            Load the video and transform it to the input format required by the model.

            + +
            # Select the duration of the clip to load by specifying the start and end duration
            +# The start_sec should correspond to where the action occurs in the video
            +start_sec = 0
            +end_sec = start_sec + clip_duration
            +
            +# Initialize an EncodedVideo helper class and load the video
            +video = EncodedVideo.from_path(video_path)
            +
            +# Load the desired clip
            +video_data = video.get_clip(start_sec=start_sec, end_sec=end_sec)
            +
            +# Apply a transform to normalize the video input
            +video_data = transform(video_data)
            +
            +# Move the inputs to the desired device
            +inputs = video_data["video"]
            +inputs = [i.to(device)[None, ...] for i in inputs]
            +
            + +

            Get Predictions

            + +
            # Pass the input clip through the model
            +preds = model(inputs)
            +
            +# Get the predicted classes
            +post_act = torch.nn.Softmax(dim=1)
            +preds = post_act(preds)
            +pred_classes = preds.topk(k=5).indices[0]
            +
            +# Map the predicted classes to the label names
            +pred_class_names = [kinetics_id_to_classname[int(i)] for i in pred_classes]
            +print("Top 5 predicted labels: %s" % ", ".join(pred_class_names))
            +
            + +

            Model Description

            +

            SlowFast model architectures are based on [1] with pretrained weights using the 8x8 setting +on the Kinetics dataset.

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            archdepthframe length x sample ratetop 1top 5Flops (G)Params (M)
            SlowFastR508x876.9492.6965.7134.57
            SlowFastR1018x877.9093.27127.2062.83
            + +

            References

            +

            [1] Christoph Feichtenhofer et al, “SlowFast Networks for Video Recognition” +https://arxiv.org/pdf/1812.03982.pdf

            + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/facebookresearch_pytorchvideo_x3d/index.html b/hub/facebookresearch_pytorchvideo_x3d/index.html new file mode 100644 index 000000000000..2048a984c05f --- /dev/null +++ b/hub/facebookresearch_pytorchvideo_x3d/index.html @@ -0,0 +1,539 @@ + + + + + + + + + + + + + X3D | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + X3D +

            + +
            +
            +

            By FAIR PyTorchVideo

            +
            + +
            +

            X3D networks pretrained on the Kinetics 400 dataset

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +

            Example Usage

            + +

            Imports

            + +

            Load the model:

            + +
            import torch
            +# Choose the `x3d_s` model
            +model_name = 'x3d_s'
            +model = torch.hub.load('facebookresearch/pytorchvideo', model_name, pretrained=True)
            +
            + +

            Import remaining functions:

            + +
            import json
            +import urllib
            +from pytorchvideo.data.encoded_video import EncodedVideo
            +
            +from torchvision.transforms import Compose, Lambda
            +from torchvision.transforms._transforms_video import (
            +    CenterCropVideo,
            +    NormalizeVideo,
            +)
            +from pytorchvideo.transforms import (
            +    ApplyTransformToKey,
            +    ShortSideScale,
            +    UniformTemporalSubsample
            +)
            +
            + +

            Setup

            + +

            Set the model to eval mode and move to desired device.

            + +
            # Set to GPU or CPU
            +device = "cpu"
            +model = model.eval()
            +model = model.to(device)
            +
            + +

            Download the id to label mapping for the Kinetics 400 dataset on which the torch hub models were trained. This will be used to get the category label names from the predicted class ids.

            + +
            json_url = "https://dl.fbaipublicfiles.com/pyslowfast/dataset/class_names/kinetics_classnames.json"
            +json_filename = "kinetics_classnames.json"
            +try: urllib.URLopener().retrieve(json_url, json_filename)
            +except: urllib.request.urlretrieve(json_url, json_filename)
            +
            + +
            with open(json_filename, "r") as f:
            +    kinetics_classnames = json.load(f)
            +
            +# Create an id to label name mapping
            +kinetics_id_to_classname = {}
            +for k, v in kinetics_classnames.items():
            +    kinetics_id_to_classname[v] = str(k).replace('"', "")
            +
            + +

            Define input transform

            + +
            mean = [0.45, 0.45, 0.45]
            +std = [0.225, 0.225, 0.225]
            +frames_per_second = 30
            +model_transform_params  = {
            +    "x3d_xs": {
            +        "side_size": 182,
            +        "crop_size": 182,
            +        "num_frames": 4,
            +        "sampling_rate": 12,
            +    },
            +    "x3d_s": {
            +        "side_size": 182,
            +        "crop_size": 182,
            +        "num_frames": 13,
            +        "sampling_rate": 6,
            +    },
            +    "x3d_m": {
            +        "side_size": 256,
            +        "crop_size": 256,
            +        "num_frames": 16,
            +        "sampling_rate": 5,
            +    }
            +}
            +
            +# Get transform parameters based on model
            +transform_params = model_transform_params[model_name]
            +
            +# Note that this transform is specific to the slow_R50 model.
            +transform =  ApplyTransformToKey(
            +    key="video",
            +    transform=Compose(
            +        [
            +            UniformTemporalSubsample(transform_params["num_frames"]),
            +            Lambda(lambda x: x/255.0),
            +            NormalizeVideo(mean, std),
            +            ShortSideScale(size=transform_params["side_size"]),
            +            CenterCropVideo(
            +                crop_size=(transform_params["crop_size"], transform_params["crop_size"])
            +            )
            +        ]
            +    ),
            +)
            +
            +# The duration of the input clip is also specific to the model.
            +clip_duration = (transform_params["num_frames"] * transform_params["sampling_rate"])/frames_per_second
            +
            + +

            Run Inference

            + +

            Download an example video.

            + +
            url_link = "https://dl.fbaipublicfiles.com/pytorchvideo/projects/archery.mp4"
            +video_path = 'archery.mp4'
            +try: urllib.URLopener().retrieve(url_link, video_path)
            +except: urllib.request.urlretrieve(url_link, video_path)
            +
            + +

            Load the video and transform it to the input format required by the model.

            + +
            # Select the duration of the clip to load by specifying the start and end duration
            +# The start_sec should correspond to where the action occurs in the video
            +start_sec = 0
            +end_sec = start_sec + clip_duration
            +
            +# Initialize an EncodedVideo helper class and load the video
            +video = EncodedVideo.from_path(video_path)
            +
            +# Load the desired clip
            +video_data = video.get_clip(start_sec=start_sec, end_sec=end_sec)
            +
            +# Apply a transform to normalize the video input
            +video_data = transform(video_data)
            +
            +# Move the inputs to the desired device
            +inputs = video_data["video"]
            +inputs = inputs.to(device)
            +
            + +

            Get Predictions

            + +
            # Pass the input clip through the model
            +preds = model(inputs[None, ...])
            +
            +# Get the predicted classes
            +post_act = torch.nn.Softmax(dim=1)
            +preds = post_act(preds)
            +pred_classes = preds.topk(k=5).indices[0]
            +
            +# Map the predicted classes to the label names
            +pred_class_names = [kinetics_id_to_classname[int(i)] for i in pred_classes]
            +print("Top 5 predicted labels: %s" % ", ".join(pred_class_names))
            +
            + +

            Model Description

            +

            X3D model architectures are based on [1] pretrained on the Kinetics dataset.

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            archdepthframe length x sample ratetop 1top 5Flops (G)Params (M)
            X3DXS4x1269.1288.630.913.79
            X3DS13x673.3391.272.963.79
            X3DM16x575.9492.726.723.79
            + +

            References

            +

            [1] Christoph Feichtenhofer, “X3D: Expanding Architectures for + Efficient Video Recognition.” https://arxiv.org/abs/2004.04730

            + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/facebookresearch_semi-supervised-ImageNet1K-models_resnext/index.html b/hub/facebookresearch_semi-supervised-ImageNet1K-models_resnext/index.html new file mode 100644 index 000000000000..f66fec64c60f --- /dev/null +++ b/hub/facebookresearch_semi-supervised-ImageNet1K-models_resnext/index.html @@ -0,0 +1,533 @@ + + + + + + + + + + + + + Semi-supervised and semi-weakly supervised ImageNet Models | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + Semi-supervised and semi-weakly supervised ImageNet Models +

            + +
            +
            +

            By Facebook AI

            +
            + +
            +

            ResNet and ResNext models introduced in the "Billion scale semi-supervised learning for image classification" paper

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +
            +# === SEMI-WEAKLY SUPERVISED MODELS PRETRAINED WITH 940 HASHTAGGED PUBLIC CONTENT ===
            +model = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnet18_swsl')
            +# model = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnet50_swsl')
            +# model = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnext50_32x4d_swsl')
            +# model = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnext101_32x4d_swsl')
            +# model = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnext101_32x8d_swsl')
            +# model = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnext101_32x16d_swsl')
            +# ================= SEMI-SUPERVISED MODELS PRETRAINED WITH YFCC100M ==================
            +# model = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnet18_ssl')
            +# model = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnet50_ssl')
            +# model = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnext50_32x4d_ssl')
            +# model = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnext101_32x4d_ssl')
            +# model = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnext101_32x8d_ssl')
            +# model = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', 'resnext101_32x16d_ssl')
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            Here’s a sample execution.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +preprocess = transforms.Compose([
            +    transforms.Resize(256),
            +    transforms.CenterCrop(224),
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)
            +# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes
            +print(output[0])
            +# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
            +print(torch.nn.functional.softmax(output[0], dim=0))
            +
            +
            + +

            Model Description

            +

            This project includes the semi-supervised and semi-weakly supervised ImageNet models introduced in “Billion-scale Semi-Supervised Learning for Image Classification” https://arxiv.org/abs/1905.00546.

            + +

            “Semi-supervised” (SSL) ImageNet models are pre-trained on a subset of unlabeled YFCC100M public image dataset and fine-tuned with the ImageNet1K training dataset, as described by the semi-supervised training framework in the paper mentioned above. In this case, the high capacity teacher model was trained only with labeled examples.

            + +

            “Semi-weakly” supervised (SWSL) ImageNet models are pre-trained on 940 million public images with 1.5K hashtags matching with 1000 ImageNet1K synsets, followed by fine-tuning on ImageNet1K dataset. In this case, the associated hashtags are only used for building a better teacher model. During training the student model, those hashtags are ingored and the student model is pretrained with a subset of 64M images selected by the teacher model from the same 940 million public image dataset.

            + +

            Semi-weakly supervised ResNet and ResNext models provided in the table below significantly improve the top-1 accuracy on the ImageNet validation set compared to training from scratch or other training mechanisms introduced in the literature as of September 2019. For example, We achieve state-of-the-art accuracy of 81.2% on ImageNet for the widely used/adopted ResNet-50 model architecture.

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            ArchitectureSupervision#ParametersFLOPSTop-1 Acc.Top-5 Acc.
            ResNet-18semi-supervised14M2B72.891.5
            ResNet-50semi-supervised25M4B79.394.9
            ResNeXt-50 32x4dsemi-supervised25M4B80.395.4
            ResNeXt-101 32x4dsemi-supervised42M8B81.095.7
            ResNeXt-101 32x8dsemi-supervised88M16B81.796.1
            ResNeXt-101 32x16dsemi-supervised193M36B81.996.2
            ResNet-18semi-weakly supervised14M2B73.491.9
            ResNet-50semi-weakly supervised25M4B81.296.0
            ResNeXt-50 32x4dsemi-weakly supervised25M4B82.296.3
            ResNeXt-101 32x4dsemi-weakly supervised42M8B83.496.8
            ResNeXt-101 32x8dsemi-weakly supervised88M16B84.397.2
            ResNeXt-101 32x16dsemi-weakly supervised193M36B84.897.4
            + +

            Citation

            + +

            If you use the models released in this repository, please cite the following publication (https://arxiv.org/abs/1905.00546).

            +
            @misc{yalniz2019billionscale,
            +    title={Billion-scale semi-supervised learning for image classification},
            +    author={I. Zeki Yalniz and Hervé Jégou and Kan Chen and Manohar Paluri and Dhruv Mahajan},
            +    year={2019},
            +    eprint={1905.00546},
            +    archivePrefix={arXiv},
            +    primaryClass={cs.CV}
            +}
            +
            + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/huggingface_pytorch-transformers/index.html b/hub/huggingface_pytorch-transformers/index.html new file mode 100644 index 000000000000..dbfe962d2278 --- /dev/null +++ b/hub/huggingface_pytorch-transformers/index.html @@ -0,0 +1,571 @@ + + + + + + + + + + + + + PyTorch-Transformers | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + PyTorch-Transformers +

            + +
            +
            +

            By HuggingFace Team

            +
            + +
            +

            PyTorch implementations of popular NLP Transformers

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +

            Model Description

            + +

            PyTorch-Transformers (formerly known as pytorch-pretrained-bert) is a library of state-of-the-art pre-trained models for Natural Language Processing (NLP).

            + +

            The library currently contains PyTorch implementations, pre-trained model weights, usage scripts and conversion utilities for the following models:

            + +
              +
            1. BERT (from Google) released with the paper BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova.
            2. +
            3. GPT (from OpenAI) released with the paper Improving Language Understanding by Generative Pre-Training by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
            4. +
            5. GPT-2 (from OpenAI) released with the paper Language Models are Unsupervised Multitask Learners by Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
            6. +
            7. Transformer-XL (from Google/CMU) released with the paper Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context by Zihang Dai, Zhilin Yang, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
            8. +
            9. XLNet (from Google/CMU) released with the paper ​XLNet: Generalized Autoregressive Pretraining for Language Understanding by Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
            10. +
            11. XLM (from Facebook) released together with the paper Cross-lingual Language Model Pretraining by Guillaume Lample and Alexis Conneau.
            12. +
            13. RoBERTa (from Facebook), released together with the paper a Robustly Optimized BERT Pretraining Approach by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
            14. +
            15. DistilBERT (from HuggingFace), released together with the blogpost Smaller, faster, cheaper, lighter: Introducing DistilBERT, a distilled version of BERT by Victor Sanh, Lysandre Debut and Thomas Wolf.
            16. +
            + +

            The components available here are based on the AutoModel and AutoTokenizer classes of the pytorch-transformers library.

            + +

            Requirements

            + +

            Unlike most other PyTorch Hub models, BERT requires a few additional Python packages to be installed.

            + +
            pip install tqdm boto3 requests regex sentencepiece sacremoses
            +
            + +

            Usage

            + +

            The available methods are the following:

            +
              +
            • config: returns a configuration item corresponding to the specified model or pth.
            • +
            • tokenizer: returns a tokenizer corresponding to the specified model or path
            • +
            • model: returns a model corresponding to the specified model or path
            • +
            • modelForCausalLM: returns a model with a language modeling head corresponding to the specified model or path
            • +
            • modelForSequenceClassification: returns a model with a sequence classifier corresponding to the specified model or path
            • +
            • modelForQuestionAnswering: returns a model with a question answering head corresponding to the specified model or path
            • +
            + +

            All these methods share the following argument: pretrained_model_or_path, which is a string identifying a pre-trained model or path from which an instance will be returned. There are several checkpoints available for each model, which are detailed below:

            + +

            The available models are listed on the transformers documentation, models page.

            + +

            Documentation

            + +

            Here are a few examples detailing the usage of each available method.

            + +

            Tokenizer

            + +

            The tokenizer object allows the conversion from character strings to tokens understood by the different models. Each model has its own tokenizer, and some tokenizing methods are different across tokenizers. The complete documentation can be found here.

            + +
            import torch
            +tokenizer = torch.hub.load('huggingface/pytorch-transformers', 'tokenizer', 'bert-base-uncased')    # Download vocabulary from S3 and cache.
            +tokenizer = torch.hub.load('huggingface/pytorch-transformers', 'tokenizer', './test/bert_saved_model/')  # E.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`
            +
            + +

            Models

            + +

            The model object is a model instance inheriting from a nn.Module. Each model is accompanied by their saving/loading methods, either from a local file or directory, or from a pre-trained configuration (see previously described config). Each model works differently, a complete overview of the different models can be found in the documentation.

            + +
            import torch
            +model = torch.hub.load('huggingface/pytorch-transformers', 'model', 'bert-base-uncased')    # Download model and configuration from S3 and cache.
            +model = torch.hub.load('huggingface/pytorch-transformers', 'model', './test/bert_model/')  # E.g. model was saved using `save_pretrained('./test/saved_model/')`
            +model = torch.hub.load('huggingface/pytorch-transformers', 'model', 'bert-base-uncased', output_attentions=True)  # Update configuration during loading
            +assert model.config.output_attentions == True
            +# Loading from a TF checkpoint file instead of a PyTorch model (slower)
            +config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
            +model = torch.hub.load('huggingface/pytorch-transformers', 'model', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
            +
            + +

            Models with a language modeling head

            + +

            Previously mentioned model instance with an additional language modeling head.

            + +
            import torch
            +model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', 'gpt2')    # Download model and configuration from huggingface.co and cache.
            +model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', './test/saved_model/')  # E.g. model was saved using `save_pretrained('./test/saved_model/')`
            +model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', 'gpt2', output_attentions=True)  # Update configuration during loading
            +assert model.config.output_attentions == True
            +# Loading from a TF checkpoint file instead of a PyTorch model (slower)
            +config = AutoConfig.from_pretrained('./tf_model/gpt_tf_model_config.json')
            +model = torch.hub.load('huggingface/transformers', 'modelForCausalLM', './tf_model/gpt_tf_checkpoint.ckpt.index', from_tf=True, config=config)
            +
            + +

            Models with a sequence classification head

            + +

            Previously mentioned model instance with an additional sequence classification head.

            + +
            import torch
            +model = torch.hub.load('huggingface/pytorch-transformers', 'modelForSequenceClassification', 'bert-base-uncased')    # Download model and configuration from S3 and cache.
            +model = torch.hub.load('huggingface/pytorch-transformers', 'modelForSequenceClassification', './test/bert_model/')  # E.g. model was saved using `save_pretrained('./test/saved_model/')`
            +model = torch.hub.load('huggingface/pytorch-transformers', 'modelForSequenceClassification', 'bert-base-uncased', output_attention=True)  # Update configuration during loading
            +assert model.config.output_attention == True
            +# Loading from a TF checkpoint file instead of a PyTorch model (slower)
            +config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
            +model = torch.hub.load('huggingface/pytorch-transformers', 'modelForSequenceClassification', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
            +
            + +

            Models with a question answering head

            + +

            Previously mentioned model instance with an additional question answering head.

            + +
            import torch
            +model = torch.hub.load('huggingface/pytorch-transformers', 'modelForQuestionAnswering', 'bert-base-uncased')    # Download model and configuration from S3 and cache.
            +model = torch.hub.load('huggingface/pytorch-transformers', 'modelForQuestionAnswering', './test/bert_model/')  # E.g. model was saved using `save_pretrained('./test/saved_model/')`
            +model = torch.hub.load('huggingface/pytorch-transformers', 'modelForQuestionAnswering', 'bert-base-uncased', output_attention=True)  # Update configuration during loading
            +assert model.config.output_attention == True
            +# Loading from a TF checkpoint file instead of a PyTorch model (slower)
            +config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
            +model = torch.hub.load('huggingface/pytorch-transformers', 'modelForQuestionAnswering', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
            +
            + +

            Configuration

            + +

            The configuration is optional. The configuration object holds information concerning the model, such as the number of heads/layers, if the model should output attentions or hidden states, or if it should be adapted for TorchScript. Many parameters are available, some specific to each model. The complete documentation can be found here.

            + +
            import torch
            +config = torch.hub.load('huggingface/pytorch-transformers', 'config', 'bert-base-uncased')  # Download configuration from S3 and cache.
            +config = torch.hub.load('huggingface/pytorch-transformers', 'config', './test/bert_saved_model/')  # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
            +config = torch.hub.load('huggingface/pytorch-transformers', 'config', './test/bert_saved_model/my_configuration.json')
            +config = torch.hub.load('huggingface/pytorch-transformers', 'config', 'bert-base-uncased', output_attention=True, foo=False)
            +assert config.output_attention == True
            +config, unused_kwargs = torch.hub.load('huggingface/pytorch-transformers', 'config', 'bert-base-uncased', output_attention=True, foo=False, return_unused_kwargs=True)
            +assert config.output_attention == True
            +assert unused_kwargs == {'foo': False}
            +
            +# Using the configuration with a model
            +config = torch.hub.load('huggingface/pytorch-transformers', 'config', 'bert-base-uncased')
            +config.output_attentions = True
            +config.output_hidden_states = True
            +model = torch.hub.load('huggingface/pytorch-transformers', 'model', 'bert-base-uncased', config=config)
            +# Model will now output attentions and hidden states as well
            +
            +
            + +

            Example Usage

            + +

            Here is an example on how to tokenize the input text to be fed as input to a BERT model, and then get the hidden states computed by such a model or predict masked tokens using language modeling BERT model.

            + +

            First, tokenize the input

            + +
            import torch
            +tokenizer = torch.hub.load('huggingface/pytorch-transformers', 'tokenizer', 'bert-base-cased')
            +
            +text_1 = "Who was Jim Henson ?"
            +text_2 = "Jim Henson was a puppeteer"
            +
            +# Tokenized input with special tokens around it (for BERT: [CLS] at the beginning and [SEP] at the end)
            +indexed_tokens = tokenizer.encode(text_1, text_2, add_special_tokens=True)
            +
            + +

            Using BertModel to encode the input sentence in a sequence of last layer hidden-states

            + +
            # Define sentence A and B indices associated to 1st and 2nd sentences (see paper)
            +segments_ids = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]
            +
            +# Convert inputs to PyTorch tensors
            +segments_tensors = torch.tensor([segments_ids])
            +tokens_tensor = torch.tensor([indexed_tokens])
            +
            +model = torch.hub.load('huggingface/pytorch-transformers', 'model', 'bert-base-cased')
            +
            +with torch.no_grad():
            +    encoded_layers, _ = model(tokens_tensor, token_type_ids=segments_tensors)
            +
            + +

            Using modelForMaskedLM to predict a masked token with BERT

            + +
            # Mask a token that we will try to predict back with `BertForMaskedLM`
            +masked_index = 8
            +indexed_tokens[masked_index] = tokenizer.mask_token_id
            +tokens_tensor = torch.tensor([indexed_tokens])
            +
            +masked_lm_model = torch.hub.load('huggingface/pytorch-transformers', 'modelForMaskedLM', 'bert-base-cased')
            +
            +with torch.no_grad():
            +    predictions = masked_lm_model(tokens_tensor, token_type_ids=segments_tensors)
            +
            +# Get the predicted token
            +predicted_index = torch.argmax(predictions[0][0], dim=1)[masked_index].item()
            +predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0]
            +assert predicted_token == 'Jim'
            +
            + +

            Using modelForQuestionAnswering to do question answering with BERT

            + +
            question_answering_model = torch.hub.load('huggingface/pytorch-transformers', 'modelForQuestionAnswering', 'bert-large-uncased-whole-word-masking-finetuned-squad')
            +question_answering_tokenizer = torch.hub.load('huggingface/pytorch-transformers', 'tokenizer', 'bert-large-uncased-whole-word-masking-finetuned-squad')
            +
            +# The format is paragraph first and then question
            +text_1 = "Jim Henson was a puppeteer"
            +text_2 = "Who was Jim Henson ?"
            +indexed_tokens = question_answering_tokenizer.encode(text_1, text_2, add_special_tokens=True)
            +segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
            +segments_tensors = torch.tensor([segments_ids])
            +tokens_tensor = torch.tensor([indexed_tokens])
            +
            +# Predict the start and end positions logits
            +with torch.no_grad():
            +    out = question_answering_model(tokens_tensor, token_type_ids=segments_tensors)
            +
            +# get the highest prediction
            +answer = question_answering_tokenizer.decode(indexed_tokens[torch.argmax(out.start_logits):torch.argmax(out.end_logits)+1])
            +assert answer == "puppeteer"
            +
            +# Or get the total loss which is the sum of the CrossEntropy loss for the start and end token positions (set model to train mode before if used for training)
            +start_positions, end_positions = torch.tensor([12]), torch.tensor([14])
            +multiple_choice_loss = question_answering_model(tokens_tensor, token_type_ids=segments_tensors, start_positions=start_positions, end_positions=end_positions)
            +
            + +

            Using modelForSequenceClassification to do paraphrase classification with BERT

            + +
            sequence_classification_model = torch.hub.load('huggingface/pytorch-transformers', 'modelForSequenceClassification', 'bert-base-cased-finetuned-mrpc')
            +sequence_classification_tokenizer = torch.hub.load('huggingface/pytorch-transformers', 'tokenizer', 'bert-base-cased-finetuned-mrpc')
            +
            +text_1 = "Jim Henson was a puppeteer"
            +text_2 = "Who was Jim Henson ?"
            +indexed_tokens = sequence_classification_tokenizer.encode(text_1, text_2, add_special_tokens=True)
            +segments_ids = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]
            +segments_tensors = torch.tensor([segments_ids])
            +tokens_tensor = torch.tensor([indexed_tokens])
            +
            +# Predict the sequence classification logits
            +with torch.no_grad():
            +    seq_classif_logits = sequence_classification_model(tokens_tensor, token_type_ids=segments_tensors)
            +
            +predicted_labels = torch.argmax(seq_classif_logits[0]).item()
            +
            +assert predicted_labels == 0  # In MRPC dataset this means the two sentences are not paraphrasing each other
            +
            +# Or get the sequence classification loss (set model to train mode before if used for training)
            +labels = torch.tensor([1])
            +seq_classif_loss = sequence_classification_model(tokens_tensor, token_type_ids=segments_tensors, labels=labels)
            +
            + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/hustvl_yolop/index.html b/hub/hustvl_yolop/index.html new file mode 100644 index 000000000000..2e553756765f --- /dev/null +++ b/hub/hustvl_yolop/index.html @@ -0,0 +1,1504 @@ + + + + + + + + + + + + + YOLOP | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + YOLOP +

            + +
            +
            +

            By Hust Visual Learning Team

            +
            + +
            +

            YOLOP pretrained on the BDD100K dataset

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +

            Before You Start

            +

            To install YOLOP dependencies:

            +
            pip install -qr https://github.com/hustvl/YOLOP/blob/main/requirements.txt  # install dependencies
            +
            + +

            YOLOP: You Only Look Once for Panoptic driving Perception

            + +

            Model Description

            + +

            YOLOP Model + 

            + +
              +
            • YOLOP is an efficient multi-task network that can jointly handle three crucial tasks in autonomous driving: object detection, drivable area segmentation and lane detection. And it is also the first to reach real-time on embedded devices while maintaining state-of-the-art level performance on the BDD100K dataset.
            • +
            + +

            Results

            + +

            Traffic Object Detection Result

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            ModelRecall(%)mAP50(%)Speed(fps)
            Multinet81.360.28.6
            DLT-Net89.468.49.3
            Faster R-CNN77.255.65.3
            YOLOv5s86.877.282
            YOLOP(ours)89.276.541
            + +

            Drivable Area Segmentation Result

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            ModelmIOU(%)Speed(fps)
            Multinet71.68.6
            DLT-Net71.39.3
            PSPNet89.611.1
            YOLOP(ours)91.541
            + +

            Lane Detection Result

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            ModelmIOU(%)IOU(%)
            ENet34.1214.64
            SCNN35.7915.84
            ENet-SAD36.5616.02
            YOLOP(ours)70.5026.20
            + +

            Ablation Studies 1: End-to-end v.s. Step-by-step

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            Training_methodRecall(%)AP(%)mIoU(%)Accuracy(%)IoU(%)
            ES-W87.075.390.466.826.2
            ED-W87.376.091.671.226.1
            ES-D-W87.075.191.768.627.0
            ED-S-W87.576.191.668.026.8
            End-to-end89.276.591.570.526.2
            + +

            Ablation Studies 2: Multi-task v.s. Single task

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            Training_methodRecall(%)AP(%)mIoU(%)Accuracy(%)IoU(%)Speed(ms/frame)
            Det(only)88.276.9---15.7
            Da-Seg(only)--92.0--14.8
            Ll-Seg(only)---79.627.914.8
            Multitask89.276.591.570.526.224.4
            + +

            Notes:

            + +
              +
            • In table 4, E, D, S and W refer to Encoder, Detect head, two Segment heads and whole network. So the Algorithm (First, we only train Encoder and Detect head. Then we freeze the Encoder and Detect head as well as train two Segmentation heads. Finally, the entire network is trained jointly for all three tasks.) can be marked as ED-S-W, and the same for others.
            • +
            + +

            Visualization

            + +

            Traffic Object Detection Result

            + +

            Traffic Object Detection Result + 

            + +

            Drivable Area Segmentation Result

            + +

            Drivable Area Segmentation Result + 

            + +

            Lane Detection Result

            + +

            Lane Detection Result + 

            + +

            Notes:

            + +
              +
            • The visualization of lane detection result has been post processed by quadratic fitting.
            • +
            + +

            Deployment

            + +

            Our model can reason in real-time on Jetson Tx2, with Zed Camera to capture image. We use TensorRT tool for speeding up. We provide code for deployment and reasoning of model in github code.

            + +

            Load From PyTorch Hub

            +

            This example loads the pretrained YOLOP model and passes an image for inference.

            +
            import torch
            +
            +# load model
            +model = torch.hub.load('hustvl/yolop', 'yolop', pretrained=True)
            +
            +#inference
            +img = torch.randn(1,3,640,640)
            +det_out, da_seg_out,ll_seg_out = model(img)
            +
            + +

            Citation

            + +

            See for more detail in github code and arxiv paper.

            + +

            If you find our paper and code useful for your research, please consider giving a star and citation:

            + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/intelisl_midas_v2/index.html b/hub/intelisl_midas_v2/index.html new file mode 100644 index 000000000000..595c59a2838b --- /dev/null +++ b/hub/intelisl_midas_v2/index.html @@ -0,0 +1,430 @@ + + + + + + + + + + + + + MiDaS | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + MiDaS +

            + +
            +
            +

            By Intel ISL

            +
            + +
            +

            MiDaS models for computing relative depth from a single image.

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +

            Model Description

            + +

            MiDaS computes relative inverse depth from a single image. The repository provides multiple models that cover different use cases ranging from a small, high-speed model to a very large model that provide the highest accuracy. The models have been trained on 10 distinct datasets using +multi-objective optimization to ensure high quality on a wide range of inputs.

            + +

            Dependencies

            + +

            MiDaS depends on timm. Install with

            +
            pip install timm
            +
            + +

            Example Usage

            + +

            Download an image from the PyTorch homepage

            +
            import cv2
            +import torch
            +import urllib.request
            +
            +import matplotlib.pyplot as plt
            +
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            +urllib.request.urlretrieve(url, filename)
            +
            + +

            Load a model (see https://github.com/intel-isl/MiDaS/#Accuracy for an overview)

            +
            model_type = "DPT_Large"     # MiDaS v3 - Large     (highest accuracy, slowest inference speed)
            +#model_type = "DPT_Hybrid"   # MiDaS v3 - Hybrid    (medium accuracy, medium inference speed)
            +#model_type = "MiDaS_small"  # MiDaS v2.1 - Small   (lowest accuracy, highest inference speed)
            +
            +midas = torch.hub.load("intel-isl/MiDaS", model_type)
            +
            +

            Move model to GPU if available

            +
            device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
            +midas.to(device)
            +midas.eval()
            +
            + +

            Load transforms to resize and normalize the image for large or small model

            +
            midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms")
            +
            +if model_type == "DPT_Large" or model_type == "DPT_Hybrid":
            +    transform = midas_transforms.dpt_transform
            +else:
            +    transform = midas_transforms.small_transform
            +
            + +

            Load image and apply transforms

            +
            img = cv2.imread(filename)
            +img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            +
            +input_batch = transform(img).to(device)
            +
            + +

            Predict and resize to original resolution

            +
            with torch.no_grad():
            +    prediction = midas(input_batch)
            +
            +    prediction = torch.nn.functional.interpolate(
            +        prediction.unsqueeze(1),
            +        size=img.shape[:2],
            +        mode="bicubic",
            +        align_corners=False,
            +    ).squeeze()
            +
            +output = prediction.cpu().numpy()
            +
            + +

            Show result

            +
            plt.imshow(output)
            +# plt.show()
            +
            + +

            References

            +

            Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer

            + +

            Vision Transformers for Dense Prediction

            + +

            Please cite our papers if you use our models:

            +
            @article{Ranftl2020,
            +	author    = {Ren\'{e} Ranftl and Katrin Lasinger and David Hafner and Konrad Schindler and Vladlen Koltun},
            +	title     = {Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer},
            +	journal   = {IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI)},
            +	year      = {2020},
            +}
            +
            +
            @article{Ranftl2021,
            +	author    = {Ren\'{e} Ranftl and Alexey Bochkovskiy and Vladlen Koltun},
            +	title     = {Vision Transformers for Dense Prediction},
            +	journal   = {ArXiv preprint},
            +	year      = {2021},
            +}
            +
            + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/mateuszbuda_brain-segmentation-pytorch_unet/index.html b/hub/mateuszbuda_brain-segmentation-pytorch_unet/index.html new file mode 100644 index 000000000000..f6afb6fa7e7e --- /dev/null +++ b/hub/mateuszbuda_brain-segmentation-pytorch_unet/index.html @@ -0,0 +1,407 @@ + + + + + + + + + + + + + U-Net for brain MRI | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + U-Net for brain MRI +

            + +
            +
            +

            By mateuszbuda

            +
            + +
            +

            U-Net with batch normalization for biomedical image segmentation with pretrained weights for abnormality segmentation in brain MRI

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +model = torch.hub.load('mateuszbuda/brain-segmentation-pytorch', 'unet',
            +    in_channels=3, out_channels=1, init_features=32, pretrained=True)
            +
            +
            + +

            Loads a U-Net model pre-trained for abnormality segmentation on a dataset of brain MRI volumes kaggle.com/mateuszbuda/lgg-mri-segmentation +The pre-trained model requires 3 input channels, 1 output channel, and 32 features in the first layer.

            + +

            Model Description

            + +

            This U-Net model comprises four levels of blocks containing two convolutional layers with batch normalization and ReLU activation function, and one max pooling layer in the encoding part and up-convolutional layers instead in the decoding part. +The number of convolutional filters in each block is 32, 64, 128, and 256. +The bottleneck layer has 512 convolutional filters. +From the encoding layers, skip connections are used to the corresponding layers in the decoding part. +Input image is a 3-channel brain MRI slice from pre-contrast, FLAIR, and post-contrast sequences, respectively. +Output is a one-channel probability map of abnormality regions with the same size as the input image. +It can be transformed to a binary segmentation mask by thresholding as shown in the example below.

            + +

            Example

            + +

            Input images for pre-trained model should have 3 channels and be resized to 256x256 pixels and z-score normalized per volume.

            + +
            # Download an example image
            +import urllib
            +url, filename = ("https://github.com/mateuszbuda/brain-segmentation-pytorch/raw/master/assets/TCGA_CS_4944.png", "TCGA_CS_4944.png")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            import numpy as np
            +from PIL import Image
            +from torchvision import transforms
            +
            +input_image = Image.open(filename)
            +m, s = np.mean(input_image, axis=(0, 1)), np.std(input_image, axis=(0, 1))
            +preprocess = transforms.Compose([
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=m, std=s),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0)
            +
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model = model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)
            +
            +print(torch.round(output[0]))
            +
            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/nicolalandro_ntsnet-cub200_ntsnet/index.html b/hub/nicolalandro_ntsnet-cub200_ntsnet/index.html new file mode 100644 index 000000000000..42a45b69ad50 --- /dev/null +++ b/hub/nicolalandro_ntsnet-cub200_ntsnet/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + ntsnet | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + ntsnet +

            + +
            +
            +

            By Moreno Caraffini and Nicola Landro

            +
            + +
            +

            classify birds using this fine-grained image classifier

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +model = torch.hub.load('nicolalandro/ntsnet-cub200', 'ntsnet', pretrained=True,
            +                       **{'topN': 6, 'device':'cpu', 'num_classes': 200})
            +
            + +

            Example Usage

            + +
            from torchvision import transforms
            +import torch
            +import urllib
            +from PIL import Image
            +
            +transform_test = transforms.Compose([
            +    transforms.Resize((600, 600), Image.BILINEAR),
            +    transforms.CenterCrop((448, 448)),
            +    # transforms.RandomHorizontalFlip(),  # only if train
            +    transforms.ToTensor(),
            +    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
            +])
            +
            +
            +model = torch.hub.load('nicolalandro/ntsnet-cub200', 'ntsnet', pretrained=True, **{'topN': 6, 'device':'cpu', 'num_classes': 200})
            +model.eval()
            +
            +url = 'https://raw.githubusercontent.com/nicolalandro/ntsnet-cub200/master/images/nts-net.png'
            +img = Image.open(urllib.request.urlopen(url))
            +scaled_img = transform_test(img)
            +torch_images = scaled_img.unsqueeze(0)
            +
            +with torch.no_grad():
            +    top_n_coordinates, concat_out, raw_logits, concat_logits, part_logits, top_n_index, top_n_prob = model(torch_images)
            +
            +    _, predict = torch.max(concat_logits, 1)
            +    pred_id = predict.item()
            +    print('bird class:', model.bird_classes[pred_id])
            +
            + +

            Model Description

            +

            This is an nts-net pretrained with CUB200 2011 dataset, which is a fine grained dataset of birds species.

            + +

            References

            +

            You can read the full paper at this link.

            +
            @INPROCEEDINGS{Gallo:2019:IVCNZ,
            +  author={Nawaz, Shah and Calefati, Alessandro and Caraffini, Moreno and Landro, Nicola and Gallo, Ignazio},
            +  booktitle={2019 International Conference on Image and Vision Computing New Zealand (IVCNZ 2019)},
            +  title={Are These Birds Similar: Learning Branched Networks for Fine-grained Representations},
            +  year={2019},
            +  month={Dec},
            +}
            +
            + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/nvidia_deeplearningexamples_efficientnet/index.html b/hub/nvidia_deeplearningexamples_efficientnet/index.html new file mode 100644 index 000000000000..9e499ee3275e --- /dev/null +++ b/hub/nvidia_deeplearningexamples_efficientnet/index.html @@ -0,0 +1,460 @@ + + + + + + + + + + + + + EfficientNet | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + EfficientNet +

            + +
            +
            +

            By NVIDIA

            +
            + +
            +

            EfficientNets are a family of image classification models, which achieve state-of-the-art accuracy, being an order-of-magnitude smaller and faster. Trained with mixed precision using Tensor Cores.

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +

            Model Description

            + +

            EfficientNet is an image classification model family. It was first described in EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks. This notebook allows you to load and test the EfficientNet-B0, EfficientNet-B4, EfficientNet-WideSE-B0 and, EfficientNet-WideSE-B4 models.

            + +

            EfficientNet-WideSE models use Squeeze-and-Excitation layers wider than original EfficientNet models, the width of SE module is proportional to the width of Depthwise Separable Convolutions instead of block width.

            + +

            WideSE models are slightly more accurate than original models.

            + +

            This model is trained with mixed precision using Tensor Cores on Volta and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results over 2x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.

            + +

            We use NHWC data layout when training using Mixed Precision.

            + +

            Example

            + +

            In the example below we will use the pretrained EfficientNet model to perform inference on image and present the result.

            + +

            To run the example you need some extra python packages installed. These are needed for preprocessing images and visualization.

            +
            !pip install validators matplotlib
            +
            + +
            import torch
            +from PIL import Image
            +import torchvision.transforms as transforms
            +import numpy as np
            +import json
            +import requests
            +import matplotlib.pyplot as plt
            +import warnings
            +warnings.filterwarnings('ignore')
            +%matplotlib inline
            +
            +device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
            +print(f'Using {device} for inference')
            +
            + +

            Load the model pretrained on ImageNet dataset.

            + +

            You can choose among the following models:

            + + + + + + + + + + + + + + + + + + + + + + + + + + +
            TorchHub entrypointDescription
            nvidia_efficientnet_b0baseline EfficientNet
            nvidia_efficientnet_b4scaled EfficientNet
            nvidia_efficientnet_widese_b0model with Squeeze-and-Excitation layers wider than baseline EfficientNet model
            nvidia_efficientnet_widese_b4model with Squeeze-and-Excitation layers wider than scaled EfficientNet model
            + +

            There are also quantized version of the models, but they require nvidia container. See quantized models

            +
            efficientnet = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_efficientnet_b0', pretrained=True)
            +utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_convnets_processing_utils')
            +
            +efficientnet.eval().to(device)
            +
            +
            + +

            Prepare sample input data.

            +
            uris = [
            +    'http://images.cocodataset.org/test-stuff2017/000000024309.jpg',
            +    'http://images.cocodataset.org/test-stuff2017/000000028117.jpg',
            +    'http://images.cocodataset.org/test-stuff2017/000000006149.jpg',
            +    'http://images.cocodataset.org/test-stuff2017/000000004954.jpg',
            +]
            +
            +batch = torch.cat(
            +    [utils.prepare_input_from_uri(uri) for uri in uris]
            +).to(device)
            +
            + +

            Run inference. Use pick_n_best(predictions=output, n=topN) helper function to pick N most probable hypotheses according to the model.

            +
            with torch.no_grad():
            +    output = torch.nn.functional.softmax(efficientnet(batch), dim=1)
            +    
            +results = utils.pick_n_best(predictions=output, n=5)
            +
            + +

            Display the result.

            +
            for uri, result in zip(uris, results):
            +    img = Image.open(requests.get(uri, stream=True).raw)
            +    img.thumbnail((256,256), Image.ANTIALIAS)
            +    plt.imshow(img)
            +    plt.show()
            +    print(result)
            +
            + +

            Details

            +

            For detailed information on model input and output, training recipies, inference and performance visit: +github +and/or NGC

            + +

            References

            + + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/nvidia_deeplearningexamples_fastpitch/index.html b/hub/nvidia_deeplearningexamples_fastpitch/index.html new file mode 100644 index 000000000000..23323353ad98 --- /dev/null +++ b/hub/nvidia_deeplearningexamples_fastpitch/index.html @@ -0,0 +1,466 @@ + + + + + + + + + + + + + FastPitch 2 | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + FastPitch 2 +

            + +
            +
            +

            By NVIDIA

            +
            + +
            +

            The FastPitch model for generating mel spectrograms from text

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +

            Model Description

            + +

            This notebook demonstrates a PyTorch implementation of the FastPitch model described in the FastPitch paper. +The FastPitch model generates mel-spectrograms and predicts a pitch contour from raw input text. In version 1.1, it does not need any pre-trained aligning model to bootstrap from. To get the audio waveform we need a second model that will produce it from the generated mel-spectrogram. In this notebook we use HiFi-GAN model for that second step.

            + +

            The FastPitch model is based on the FastSpeech model. The main differences between FastPitch vs FastSpeech are as follows:

            +
              +
            • no dependence on external aligner (Transformer TTS, Tacotron 2); in version 1.1, FastPitch aligns audio to transcriptions by itself as in One TTS Alignment To Rule Them All,
            • +
            • FastPitch explicitly learns to predict the pitch contour,
            • +
            • pitch conditioning removes harsh sounding artifacts and provides faster convergence,
            • +
            • no need for distilling mel-spectrograms with a teacher model,
            • +
            • capabilities to train a multi-speaker model.
            • +
            + +

            Model architecture

            + +

            FastPitch Architecture

            + +

            Example

            +

            In the example below:

            + +
              +
            • pretrained FastPitch and HiFiGAN models are loaded from torch.hub
            • +
            • given tensor representation of an input text (“Say this smoothly to prove you are not a robot.”), FastPitch generates mel spectrogram
            • +
            • HiFiGAN generates sound given the mel spectrogram
            • +
            • the output sound is saved in an ‘audio.wav’ file
            • +
            + +

            To run the example you need some extra python packages installed. These are needed for preprocessing of text and audio, as well as for display and input/output handling. Finally, for better performance of FastPitch model, we download the CMU pronounciation dictionary.

            +
            apt-get update
            +apt-get install -y libsndfile1 wget
            +pip install numpy scipy librosa unidecode inflect librosa matplotlib==3.6.3
            +wget https://raw.githubusercontent.com/NVIDIA/NeMo/263a30be71e859cee330e5925332009da3e5efbc/scripts/tts_dataset_files/heteronyms-052722 -qO heteronyms
            +wget https://raw.githubusercontent.com/NVIDIA/NeMo/263a30be71e859cee330e5925332009da3e5efbc/scripts/tts_dataset_files/cmudict-0.7b_nv22.08 -qO cmudict-0.7b
            +
            + +
            import torch
            +import matplotlib.pyplot as plt
            +from IPython.display import Audio
            +import warnings
            +warnings.filterwarnings('ignore')
            +
            +device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
            +print(f'Using {device} for inference')
            +
            + +

            Download and setup FastPitch generator model.

            +
            fastpitch, generator_train_setup = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_fastpitch')
            +
            + +

            Download and setup vocoder and denoiser models.

            +
            hifigan, vocoder_train_setup, denoiser = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_hifigan')
            +
            + +

            Verify that generator and vocoder models agree on input parameters.

            +
            CHECKPOINT_SPECIFIC_ARGS = [
            +    'sampling_rate', 'hop_length', 'win_length', 'p_arpabet', 'text_cleaners',
            +    'symbol_set', 'max_wav_value', 'prepend_space_to_text',
            +    'append_space_to_text']
            +
            +for k in CHECKPOINT_SPECIFIC_ARGS:
            +
            +    v1 = generator_train_setup.get(k, None)
            +    v2 = vocoder_train_setup.get(k, None)
            +
            +    assert v1 is None or v2 is None or v1 == v2, \
            +        f'{k} mismatch in spectrogram generator and vocoder'
            +
            + +

            Put all models on available device.

            +
            fastpitch.to(device)
            +hifigan.to(device)
            +denoiser.to(device)
            +
            + +

            Load text processor.

            +
            tp = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_textprocessing_utils', cmudict_path="cmudict-0.7b", heteronyms_path="heteronyms")
            +
            + +

            Set the text to be synthetized, prepare input and set additional generation parameters.

            +
            text = "Say this smoothly, to prove you are not a robot."
            +
            + +
            batches = tp.prepare_input_sequence([text], batch_size=1)
            +
            + +
            gen_kw = {'pace': 1.0,
            +          'speaker': 0,
            +          'pitch_tgt': None,
            +          'pitch_transform': None}
            +denoising_strength = 0.005
            +
            + +
            for batch in batches:
            +    with torch.no_grad():
            +        mel, mel_lens, *_ = fastpitch(batch['text'].to(device), **gen_kw)
            +        audios = hifigan(mel).float()
            +        audios = denoiser(audios.squeeze(1), denoising_strength)
            +        audios = audios.squeeze(1) * vocoder_train_setup['max_wav_value']
            +
            +
            + +

            Plot the intermediate spectorgram.

            +
            plt.figure(figsize=(10,12))
            +res_mel = mel[0].detach().cpu().numpy()
            +plt.imshow(res_mel, origin='lower')
            +plt.xlabel('time')
            +plt.ylabel('frequency')
            +_=plt.title('Spectrogram')
            +
            + +

            Syntesize audio.

            +
            audio_numpy = audios[0].cpu().numpy()
            +Audio(audio_numpy, rate=22050)
            +
            + +

            Write audio to wav file.

            +
            from scipy.io.wavfile import write
            +write("audio.wav", vocoder_train_setup['sampling_rate'], audio_numpy)
            +
            + +

            Details

            +

            For detailed information on model input and output, training recipies, inference and performance visit: github and/or NGC

            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/nvidia_deeplearningexamples_gpunet/index.html b/hub/nvidia_deeplearningexamples_gpunet/index.html new file mode 100644 index 000000000000..9738486106fa --- /dev/null +++ b/hub/nvidia_deeplearningexamples_gpunet/index.html @@ -0,0 +1,482 @@ + + + + + + + + + + + + + GPUNet | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + GPUNet +

            + +
            +
            +

            By NVIDIA

            +
            + +
            +

            GPUNet is a new family of Convolutional Neural Networks designed to max out the performance of NVIDIA GPU and TensorRT.

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +

            Model Description

            +

            GPUNets are a new family of deployment and production ready Convolutional Neural Networks from NVIDIA auto-designed to max out the performance of NVIDIA GPU and TensorRT.

            + +

            Crafted by NVIDIA AI using novel Neural Architecture Search(NAS) methods, GPUNet demonstrates state-of-the-art inference performance up to 2x faster than EfficientNet-X and FBNet-V3. This notebook allows you to load and test all the the GPUNet model implementation listed in our CVPR-2022 paper. You can use this notebook to quickly load each one of listed models to perform inference runs.

            + +

            Example

            +

            In the example below the pretrained GPUNet-0 model is loaded by default to perform inference on image and present the result. You can switch the default pre-trained model loading from GPUNet-0 to one of these: GPUNet-1, GPUNet-2, GPUNet-P0, GPUNet-P1, GPUNet-D1 or GPUNet-D2.

            +

            Install pre-requisites

            +

            To run the example you need some extra python packages installed. These are needed for preprocessing images and visualization.

            +
            !pip install validators matplotlib
            +!pip install timm==0.5.4
            +
            + +
            import torch
            +from PIL import Image
            +import torchvision.transforms as transforms
            +import numpy as np
            +import json
            +import requests
            +import matplotlib.pyplot as plt
            +import warnings
            +
            +warnings.filterwarnings('ignore')
            +%matplotlib inline
            +
            +
            +if torch.cuda.is_available():
            +    device = torch.device("cuda") 
            +    !nvidia-smi
            +else:
            +    device = torch.device("cpu")
            +
            +print(f'Using {device} for inference')
            +
            + +

            Load Pretrained model

            +

            Loads NVIDIA GPUNet-0 model by default pre-trained on ImageNet dataset. You can switch the default pre-trained model loading from GPUNet-0 to one of the following models listed below.

            + +

            The model architecture is visible as output of the loaded model. For details architecture and latency info please refer to architecture section in the original repo and Table#3 in the CVPR-2022 paper, respectively.

            + +

            Please pick and choose one of the following pre-trained models:

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            TorchHub modelDescription
            GPUNet-0GPUNet-0 has the fastest measured latency on GV100
            GPUNet-1GPUNet-1 has improved accuracy with one additional layer on GPUNet-0
            GPUNet-2GPUNet-2 has higher accuracy with two additional layers on GPUNet-0
            GPUNet-P0GPUNet-P0 is the distilled model with higher accuracy than GPUNet-0 but similar latency
            GPUNet-P1GPUNet-P1 is distilled model with even higher accuracy than GPUNet-1 but similar latency
            GPUNet-D1GPUNet-D1 has the second highest accuracy amongst all GPUNets
            GPUNet-D2GPUNet-D2 has the highest accuracy amongst all GPUNets
            + +
            model_type = "GPUNet-0" # select one from above
            +precision = "fp32" # select either fp32 of fp16 (for better performance on GPU)
            +
            +gpunet = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_gpunet', pretrained=True, model_type=model_type, model_math=precision)
            +utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_convnets_processing_utils')
            +
            +gpunet.to(device)
            +gpunet.eval()
            +
            + +

            Prepare inference data

            +

            Prepare sample input data for inference.

            +
            uris = [
            +    'http://images.cocodataset.org/test-stuff2017/000000024309.jpg',
            +    'http://images.cocodataset.org/test-stuff2017/000000028117.jpg',
            +    'http://images.cocodataset.org/test-stuff2017/000000006149.jpg',
            +    'http://images.cocodataset.org/test-stuff2017/000000004954.jpg',
            +]
            +
            +batch = torch.cat(
            +    [utils.prepare_input_from_uri(uri) for uri in uris]
            +).to(device)
            +
            +if precision == "fp16":
            +    batch = batch.half()
            +    
            +print("Ready to run inference...")
            +
            + +

            Run inference

            +

            Use pick_n_best(predictions=output, n=topN) helper function to pick N most probable hypotheses according to the model.

            + +
            with torch.no_grad():
            +    output = torch.nn.functional.softmax(gpunet(batch), dim=1)
            +    
            +results = utils.pick_n_best(predictions=output, n=5)
            +
            + +

            Display result

            +
            for uri, result in zip(uris, results):
            +    img = Image.open(requests.get(uri, stream=True).raw)
            +    img.thumbnail((256,256), Image.ANTIALIAS)
            +    plt.imshow(img)
            +    plt.show()
            +    print(result)
            +
            + +

            Details

            +

            For detailed information on model input and output, training recipies, inference and performance visit: +github

            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/nvidia_deeplearningexamples_hifigan/index.html b/hub/nvidia_deeplearningexamples_hifigan/index.html new file mode 100644 index 000000000000..6e82cdd6f756 --- /dev/null +++ b/hub/nvidia_deeplearningexamples_hifigan/index.html @@ -0,0 +1,465 @@ + + + + + + + + + + + + + HiFi GAN | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + HiFi GAN +

            + +
            +
            +

            By NVIDIA

            +
            + +
            +

            The HiFi GAN model for generating waveforms from mel spectrograms

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +

            Model Description

            +

            This notebook demonstrates a PyTorch implementation of the HiFi-GAN model described in the paper: HiFi-GAN: Generative Adversarial Networks for Efficient and High Fidelity Speech Synthesis. +The HiFi-GAN model implements a spectrogram inversion model that allows to synthesize speech waveforms from mel-spectrograms. It follows the generative adversarial network (GAN) paradigm, and is composed of a generator and a discriminator. After training, the generator is used for synthesis, and the discriminator is discarded.

            + +

            Our implementation is based on the one published by the authors of the paper. We modify the original hyperparameters and provide an alternative training recipe, which enables training on larger batches and faster convergence. HiFi-GAN is trained on a publicly available LJ Speech dataset. The samples demonstrate speech synthesized with our publicly available FastPitch and HiFi-GAN checkpoints.

            + +

            Model architecture

            + +

            HiFiGAN Architecture

            + +

            Example

            +

            In the example below:

            + +
              +
            • pretrained FastPitch and HiFiGAN models are loaded from torch.hub
            • +
            • given tensor representation of an input text (“Say this smoothly to prove you are not a robot.”), FastPitch generates mel spectrogram
            • +
            • HiFiGAN generates sound given the mel spectrogram
            • +
            • the output sound is saved in an ‘audio.wav’ file
            • +
            + +

            To run the example you need some extra python packages installed. These are needed for preprocessing of text and audio, as well as for display and input/output handling. Finally, for better performance of FastPitch model, we download the CMU pronounciation dictionary.

            +
            pip install numpy scipy librosa unidecode inflect librosa matplotlib==3.6.3
            +apt-get update
            +apt-get install -y libsndfile1 wget
            +wget https://raw.githubusercontent.com/NVIDIA/NeMo/263a30be71e859cee330e5925332009da3e5efbc/scripts/tts_dataset_files/heteronyms-052722 -qO heteronyms
            +wget https://raw.githubusercontent.com/NVIDIA/NeMo/263a30be71e859cee330e5925332009da3e5efbc/scripts/tts_dataset_files/cmudict-0.7b_nv22.08 -qO cmudict-0.7b
            +
            + +
            import torch
            +import matplotlib.pyplot as plt
            +from IPython.display import Audio
            +import warnings
            +warnings.filterwarnings('ignore')
            +
            +device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
            +print(f'Using {device} for inference')
            +
            + +

            Download and setup FastPitch generator model.

            +
            fastpitch, generator_train_setup = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_fastpitch')
            +
            + +

            Download and setup vocoder and denoiser models.

            +
            hifigan, vocoder_train_setup, denoiser = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_hifigan')
            +
            + +

            Verify that generator and vocoder models agree on input parameters.

            +
            CHECKPOINT_SPECIFIC_ARGS = [
            +    'sampling_rate', 'hop_length', 'win_length', 'p_arpabet', 'text_cleaners',
            +    'symbol_set', 'max_wav_value', 'prepend_space_to_text',
            +    'append_space_to_text']
            +
            +for k in CHECKPOINT_SPECIFIC_ARGS:
            +
            +    v1 = generator_train_setup.get(k, None)
            +    v2 = vocoder_train_setup.get(k, None)
            +
            +    assert v1 is None or v2 is None or v1 == v2, \
            +        f'{k} mismatch in spectrogram generator and vocoder'
            +
            + +

            Put all models on available device.

            +
            fastpitch.to(device)
            +hifigan.to(device)
            +denoiser.to(device)
            +
            + +

            Load text processor.

            +
            tp = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_textprocessing_utils', cmudict_path="cmudict-0.7b", heteronyms_path="heteronyms")
            +
            + +

            Set the text to be synthetized, prepare input and set additional generation parameters.

            +
            text = "Say this smoothly, to prove you are not a robot."
            +
            + +
            batches = tp.prepare_input_sequence([text], batch_size=1)
            +
            + +
            gen_kw = {'pace': 1.0,
            +          'speaker': 0,
            +          'pitch_tgt': None,
            +          'pitch_transform': None}
            +denoising_strength = 0.005
            +
            + +
            for batch in batches:
            +    with torch.no_grad():
            +        mel, mel_lens, *_ = fastpitch(batch['text'].to(device), **gen_kw)
            +        audios = hifigan(mel).float()
            +        audios = denoiser(audios.squeeze(1), denoising_strength)
            +        audios = audios.squeeze(1) * vocoder_train_setup['max_wav_value']
            +
            +
            + +

            Plot the intermediate spectorgram.

            +
            plt.figure(figsize=(10,12))
            +res_mel = mel[0].detach().cpu().numpy()
            +plt.imshow(res_mel, origin='lower')
            +plt.xlabel('time')
            +plt.ylabel('frequency')
            +_=plt.title('Spectrogram')
            +
            + +

            Syntesize audio.

            +
            audio_numpy = audios[0].cpu().numpy()
            +Audio(audio_numpy, rate=22050)
            +
            + +

            Write audio to wav file.

            +
            from scipy.io.wavfile import write
            +write("audio.wav", vocoder_train_setup['sampling_rate'], audio_numpy)
            +
            + +

            Details

            +

            For detailed information on model input and output, training recipies, inference and performance visit: github and/or NGC

            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/nvidia_deeplearningexamples_resnet50/index.html b/hub/nvidia_deeplearningexamples_resnet50/index.html new file mode 100644 index 000000000000..68c5b18cfb80 --- /dev/null +++ b/hub/nvidia_deeplearningexamples_resnet50/index.html @@ -0,0 +1,433 @@ + + + + + + + + + + + + + ResNet50 | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + ResNet50 +

            + +
            +
            +

            By NVIDIA

            +
            + +
            +

            ResNet50 model trained with mixed precision using Tensor Cores.

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +

            Model Description

            + +

            The ResNet50 v1.5 model is a modified version of the original ResNet50 v1 model.

            + +

            The difference between v1 and v1.5 is that, in the bottleneck blocks which requires +downsampling, v1 has stride = 2 in the first 1x1 convolution, whereas v1.5 has stride = 2 in the 3x3 convolution.

            + +

            This difference makes ResNet50 v1.5 slightly more accurate (~0.5% top1) than v1, but comes with a small performance drawback (~5% imgs/sec).

            + +

            The model is initialized as described in Delving deep into rectifiers: Surpassing human-level performance on ImageNet classification

            + +

            This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results over 2x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.

            + +

            Note that the ResNet50 v1.5 model can be deployed for inference on the NVIDIA Triton Inference Server using TorchScript, ONNX Runtime or TensorRT as an execution backend. For details check NGC

            + +

            Example

            + +

            In the example below we will use the pretrained ResNet50 v1.5 model to perform inference on image and present the result.

            + +

            To run the example you need some extra python packages installed. These are needed for preprocessing images and visualization.

            + +
            !pip install validators matplotlib
            +
            + +
            import torch
            +from PIL import Image
            +import torchvision.transforms as transforms
            +import numpy as np
            +import json
            +import requests
            +import matplotlib.pyplot as plt
            +import warnings
            +warnings.filterwarnings('ignore')
            +%matplotlib inline
            +
            +device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
            +print(f'Using {device} for inference')
            +
            + +

            Load the model pretrained on ImageNet dataset.

            + +
            resnet50 = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_resnet50', pretrained=True)
            +utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_convnets_processing_utils')
            +
            +resnet50.eval().to(device)
            +
            + +

            Prepare sample input data.

            + +
            uris = [
            +    'http://images.cocodataset.org/test-stuff2017/000000024309.jpg',
            +    'http://images.cocodataset.org/test-stuff2017/000000028117.jpg',
            +    'http://images.cocodataset.org/test-stuff2017/000000006149.jpg',
            +    'http://images.cocodataset.org/test-stuff2017/000000004954.jpg',
            +]
            +
            +batch = torch.cat(
            +    [utils.prepare_input_from_uri(uri) for uri in uris]
            +).to(device)
            +
            + +

            Run inference. Use pick_n_best(predictions=output, n=topN) helper function to pick N most probably hypothesis according to the model.

            + +
            with torch.no_grad():
            +    output = torch.nn.functional.softmax(resnet50(batch), dim=1)
            +
            +results = utils.pick_n_best(predictions=output, n=5)
            +
            + +

            Display the result.

            + +
            for uri, result in zip(uris, results):
            +    img = Image.open(requests.get(uri, stream=True).raw)
            +    img.thumbnail((256,256), Image.LANCZOS)
            +    plt.imshow(img)
            +    plt.show()
            +    print(result)
            +
            +
            + +

            Details

            + +

            For detailed information on model input and output, training recipies, inference and performance visit: +github +and/or NGC

            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/nvidia_deeplearningexamples_resnext/index.html b/hub/nvidia_deeplearningexamples_resnext/index.html new file mode 100644 index 000000000000..02491be29ecc --- /dev/null +++ b/hub/nvidia_deeplearningexamples_resnext/index.html @@ -0,0 +1,433 @@ + + + + + + + + + + + + + ResNeXt101 | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + ResNeXt101 +

            + +
            +
            +

            By NVIDIA

            +
            + +
            +

            ResNet with bottleneck 3x3 Convolutions substituted by 3x3 Grouped Convolutions, trained with mixed precision using Tensor Cores.

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +

            Model Description

            + +

            The ResNeXt101-32x4d is a model introduced in the Aggregated Residual Transformations for Deep Neural Networks paper.

            + +

            It is based on regular ResNet model, substituting 3x3 convolutions inside the bottleneck block for 3x3 grouped convolutions.

            + +

            This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results 3x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.

            + +

            We use NHWC data layout when training using Mixed Precision.

            + +

            Note that the ResNeXt101-32x4d model can be deployed for inference on the NVIDIA Triton Inference Server using TorchScript, ONNX Runtime or TensorRT as an execution backend. For details check NGC

            + +

            Model architecture

            + +

            ResNextArch

            + +

            Image source: Aggregated Residual Transformations for Deep Neural Networks

            + +

            Image shows difference between ResNet bottleneck block and ResNeXt bottleneck block.

            + +

            ResNeXt101-32x4d model’s cardinality equals to 32 and bottleneck width equals to 4.

            +

            Example

            + +

            In the example below we will use the pretrained ResNeXt101-32x4d model to perform inference on images and present the result.

            + +

            To run the example you need some extra python packages installed. These are needed for preprocessing images and visualization.

            +
            !pip install validators matplotlib
            +
            + +
            import torch
            +from PIL import Image
            +import torchvision.transforms as transforms
            +import numpy as np
            +import json
            +import requests
            +import matplotlib.pyplot as plt
            +import warnings
            +warnings.filterwarnings('ignore')
            +%matplotlib inline
            +
            +device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
            +print(f'Using {device} for inference')
            +
            + +

            Load the model pretrained on ImageNet dataset.

            +
            resneXt = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_resneXt')
            +utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_convnets_processing_utils')
            +
            +resneXt.eval().to(device)
            +
            + +

            Prepare sample input data.

            +
            uris = [
            +    'http://images.cocodataset.org/test-stuff2017/000000024309.jpg',
            +    'http://images.cocodataset.org/test-stuff2017/000000028117.jpg',
            +    'http://images.cocodataset.org/test-stuff2017/000000006149.jpg',
            +    'http://images.cocodataset.org/test-stuff2017/000000004954.jpg',
            +]
            +
            +
            +batch = torch.cat(
            +    [utils.prepare_input_from_uri(uri) for uri in uris]
            +).to(device)
            +
            + +

            Run inference. Use pick_n_best(predictions=output, n=topN) helper function to pick N most probably hypothesis according to the model.

            +
            with torch.no_grad():
            +    output = torch.nn.functional.softmax(resneXt(batch), dim=1)
            +    
            +results = utils.pick_n_best(predictions=output, n=5)
            +
            + +

            Display the result.

            +
            for uri, result in zip(uris, results):
            +    img = Image.open(requests.get(uri, stream=True).raw)
            +    img.thumbnail((256,256), Image.ANTIALIAS)
            +    plt.imshow(img)
            +    plt.show()
            +    print(result)
            +
            +
            + +

            Details

            +

            For detailed information on model input and output, training recipies, inference and performance visit: +github +and/or NGC

            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/nvidia_deeplearningexamples_se-resnext/index.html b/hub/nvidia_deeplearningexamples_se-resnext/index.html new file mode 100644 index 000000000000..ff70b37b7622 --- /dev/null +++ b/hub/nvidia_deeplearningexamples_se-resnext/index.html @@ -0,0 +1,432 @@ + + + + + + + + + + + + + SE-ResNeXt101 | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + SE-ResNeXt101 +

            + +
            +
            +

            By NVIDIA

            +
            + +
            +

            ResNeXt with Squeeze-and-Excitation module added, trained with mixed precision using Tensor Cores.

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +

            Model Description

            + +

            The SE-ResNeXt101-32x4d is a ResNeXt101-32x4d +model with added Squeeze-and-Excitation module introduced +in the Squeeze-and-Excitation Networks paper.

            + +

            This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results 3x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.

            + +

            We use NHWC data layout when training using Mixed Precision.

            + +

            Model architecture

            + +

            SEArch

            + +

            Image source: Squeeze-and-Excitation Networks

            + +

            Image shows the architecture of SE block and where is it placed in ResNet bottleneck block.

            + +

            Note that the SE-ResNeXt101-32x4d model can be deployed for inference on the NVIDIA Triton Inference Server using TorchScript, ONNX Runtime or TensorRT as an execution backend. For details check NGC.

            + +

            Example

            + +

            In the example below we will use the pretrained SE-ResNeXt101-32x4d model to perform inference on images and present the result.

            + +

            To run the example you need some extra python packages installed. These are needed for preprocessing images and visualization.

            +
            !pip install validators matplotlib
            +
            + +
            import torch
            +from PIL import Image
            +import torchvision.transforms as transforms
            +import numpy as np
            +import json
            +import requests
            +import matplotlib.pyplot as plt
            +import warnings
            +warnings.filterwarnings('ignore')
            +%matplotlib inline
            +
            +device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
            +print(f'Using {device} for inference')
            +
            + +

            Load the model pretrained on ImageNet dataset.

            +
            resneXt = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_se_resnext101_32x4d')
            +utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_convnets_processing_utils')
            +
            +resneXt.eval().to(device)
            +
            + +

            Prepare sample input data.

            +
            uris = [
            +    'http://images.cocodataset.org/test-stuff2017/000000024309.jpg',
            +    'http://images.cocodataset.org/test-stuff2017/000000028117.jpg',
            +    'http://images.cocodataset.org/test-stuff2017/000000006149.jpg',
            +    'http://images.cocodataset.org/test-stuff2017/000000004954.jpg',
            +]
            +
            +
            +batch = torch.cat(
            +    [utils.prepare_input_from_uri(uri) for uri in uris]
            +).to(device)
            +
            + +

            Run inference. Use pick_n_best(predictions=output, n=topN) helper function to pick N most probable hypotheses according to the model.

            +
            with torch.no_grad():
            +    output = torch.nn.functional.softmax(resneXt(batch), dim=1)
            +    
            +results = utils.pick_n_best(predictions=output, n=5)
            +
            + +

            Display the result.

            +
            for uri, result in zip(uris, results):
            +    img = Image.open(requests.get(uri, stream=True).raw)
            +    img.thumbnail((256,256), Image.ANTIALIAS)
            +    plt.imshow(img)
            +    plt.show()
            +    print(result)
            +
            +
            + +

            Details

            +

            For detailed information on model input and output, training recipies, inference and performance visit: +github +and/or NGC.

            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/nvidia_deeplearningexamples_ssd/index.html b/hub/nvidia_deeplearningexamples_ssd/index.html new file mode 100644 index 000000000000..dff5cb7d34c4 --- /dev/null +++ b/hub/nvidia_deeplearningexamples_ssd/index.html @@ -0,0 +1,450 @@ + + + + + + + + + + + + + SSD | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + SSD +

            + +
            +
            +

            By NVIDIA

            +
            + +
            +

            Single Shot MultiBox Detector model for object detection

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +

            Model Description

            + +

            This SSD300 model is based on the +SSD: Single Shot MultiBox Detector paper, which +describes SSD as “a method for detecting objects in images using a single deep neural network”. +The input size is fixed to 300x300.

            + +

            The main difference between this model and the one described in the paper is in the backbone. +Specifically, the VGG model is obsolete and is replaced by the ResNet-50 model.

            + +

            From the +Speed/accuracy trade-offs for modern convolutional object detectors +paper, the following enhancements were made to the backbone:

            +
              +
            • The conv5_x, avgpool, fc and softmax layers were removed from the original classification model.
            • +
            • All strides in conv4_x are set to 1x1.
            • +
            + +

            The backbone is followed by 5 additional convolutional layers. +In addition to the convolutional layers, we attached 6 detection heads:

            +
              +
            • The first detection head is attached to the last conv4_x layer.
            • +
            • The other five detection heads are attached to the corresponding 5 additional layers.
            • +
            + +

            Detector heads are similar to the ones referenced in the paper, however, +they are enhanced by additional BatchNorm layers after each convolution.

            + +

            Example

            + +

            In the example below we will use the pretrained SSD model to detect objects in sample images and visualize the result.

            + +

            To run the example you need some extra python packages installed. These are needed for preprocessing images and visualization.

            +
            pip install numpy scipy scikit-image matplotlib
            +
            + +

            Load an SSD model pretrained on COCO dataset, as well as a set of utility methods for convenient and comprehensive formatting of input and output of the model.

            +
            import torch
            +ssd_model = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_ssd')
            +utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_ssd_processing_utils')
            +
            + +

            Now, prepare the loaded model for inference

            +
            ssd_model.to('cuda')
            +ssd_model.eval()
            +
            + +

            Prepare input images for object detection. +(Example links below correspond to first few test images from the COCO dataset, but you can also specify paths to your local images here)

            +
            uris = [
            +    'http://images.cocodataset.org/val2017/000000397133.jpg',
            +    'http://images.cocodataset.org/val2017/000000037777.jpg',
            +    'http://images.cocodataset.org/val2017/000000252219.jpg'
            +]
            +
            + +

            Format the images to comply with the network input and convert them to tensor.

            +
            inputs = [utils.prepare_input(uri) for uri in uris]
            +tensor = utils.prepare_tensor(inputs)
            +
            + +

            Run the SSD network to perform object detection.

            +
            with torch.no_grad():
            +    detections_batch = ssd_model(tensor)
            +
            + +

            By default, raw output from SSD network per input image contains +8732 boxes with localization and class probability distribution. +Let’s filter this output to only get reasonable detections (confidence>40%) in a more comprehensive format.

            +
            results_per_input = utils.decode_results(detections_batch)
            +best_results_per_input = [utils.pick_best(results, 0.40) for results in results_per_input]
            +
            + +

            The model was trained on COCO dataset, which we need to access in order to translate class IDs into object names. +For the first time, downloading annotations may take a while.

            +
            classes_to_labels = utils.get_coco_object_dictionary()
            +
            + +

            Finally, let’s visualize our detections

            +
            from matplotlib import pyplot as plt
            +import matplotlib.patches as patches
            +
            +for image_idx in range(len(best_results_per_input)):
            +    fig, ax = plt.subplots(1)
            +    # Show original, denormalized image...
            +    image = inputs[image_idx] / 2 + 0.5
            +    ax.imshow(image)
            +    # ...with detections
            +    bboxes, classes, confidences = best_results_per_input[image_idx]
            +    for idx in range(len(bboxes)):
            +        left, bot, right, top = bboxes[idx]
            +        x, y, w, h = [val * 300 for val in [left, bot, right - left, top - bot]]
            +        rect = patches.Rectangle((x, y), w, h, linewidth=1, edgecolor='r', facecolor='none')
            +        ax.add_patch(rect)
            +        ax.text(x, y, "{} {:.0f}%".format(classes_to_labels[classes[idx] - 1], confidences[idx]*100), bbox=dict(facecolor='white', alpha=0.5))
            +plt.show()
            +
            + +

            Details

            +

            For detailed information on model input and output, +training recipies, inference and performance visit: +github +and/or NGC

            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/nvidia_deeplearningexamples_tacotron2/index.html b/hub/nvidia_deeplearningexamples_tacotron2/index.html new file mode 100644 index 000000000000..5252618f5542 --- /dev/null +++ b/hub/nvidia_deeplearningexamples_tacotron2/index.html @@ -0,0 +1,413 @@ + + + + + + + + + + + + + Tacotron 2 | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + Tacotron 2 +

            + +
            +
            +

            By NVIDIA

            +
            + +
            +

            The Tacotron 2 model for generating mel spectrograms from text

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +

            Model Description

            + +

            The Tacotron 2 and WaveGlow model form a text-to-speech system that enables user to synthesise a natural sounding speech from raw transcripts without any additional prosody information. The Tacotron 2 model produces mel spectrograms from input text using encoder-decoder architecture. WaveGlow (also available via torch.hub) is a flow-based model that consumes the mel spectrograms to generate speech.

            + +

            This implementation of Tacotron 2 model differs from the model described in the paper. Our implementation uses Dropout instead of Zoneout to regularize the LSTM layers.

            + +

            Example

            + +

            In the example below:

            +
              +
            • pretrained Tacotron2 and Waveglow models are loaded from torch.hub
            • +
            • Given a tensor representation of the input text (“Hello world, I missed you so much”), Tacotron2 generates a Mel spectrogram as shown on the illustration
            • +
            • Waveglow generates sound given the mel spectrogram
            • +
            • the output sound is saved in an ‘audio.wav’ file
            • +
            + +

            To run the example you need some extra python packages installed. +These are needed for preprocessing the text and audio, as well as for display and input / output.

            +
            pip install numpy scipy librosa unidecode inflect librosa
            +apt-get update
            +apt-get install -y libsndfile1
            +
            + +

            Load the Tacotron2 model pre-trained on LJ Speech dataset and prepare it for inference:

            +
            import torch
            +tacotron2 = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_tacotron2', model_math='fp16')
            +tacotron2 = tacotron2.to('cuda')
            +tacotron2.eval()
            +
            + +

            Load pretrained WaveGlow model

            +
            waveglow = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_waveglow', model_math='fp16')
            +waveglow = waveglow.remove_weightnorm(waveglow)
            +waveglow = waveglow.to('cuda')
            +waveglow.eval()
            +
            + +

            Now, let’s make the model say:

            +
            text = "Hello world, I missed you so much."
            +
            + +

            Format the input using utility methods

            +
            utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_tts_utils')
            +sequences, lengths = utils.prepare_input_sequence([text])
            +
            + +

            Run the chained models:

            +
            with torch.no_grad():
            +    mel, _, _ = tacotron2.infer(sequences, lengths)
            +    audio = waveglow.infer(mel)
            +audio_numpy = audio[0].data.cpu().numpy()
            +rate = 22050
            +
            + +

            You can write it to a file and listen to it

            +
            from scipy.io.wavfile import write
            +write("audio.wav", rate, audio_numpy)
            +
            + +

            Alternatively, play it right away in a notebook with IPython widgets

            +
            from IPython.display import Audio
            +Audio(audio_numpy, rate=rate)
            +
            + +

            Details

            +

            For detailed information on model input and output, training recipies, inference and performance visit: github and/or NGC

            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/nvidia_deeplearningexamples_waveglow/index.html b/hub/nvidia_deeplearningexamples_waveglow/index.html new file mode 100644 index 000000000000..2f7b1084a86b --- /dev/null +++ b/hub/nvidia_deeplearningexamples_waveglow/index.html @@ -0,0 +1,414 @@ + + + + + + + + + + + + + WaveGlow | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + WaveGlow +

            + +
            +
            +

            By NVIDIA

            +
            + +
            +

            WaveGlow model for generating speech from mel spectrograms (generated by Tacotron2)

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +

            Model Description

            + +

            The Tacotron 2 and WaveGlow model form a text-to-speech system that enables user to synthesise a natural sounding speech from raw transcripts without any additional prosody information. The Tacotron 2 model (also available via torch.hub) produces mel spectrograms from input text using encoder-decoder architecture. WaveGlow is a flow-based model that consumes the mel spectrograms to generate speech.

            + +

            Example

            + +

            In the example below:

            +
              +
            • pretrained Tacotron2 and Waveglow models are loaded from torch.hub
            • +
            • Given a tensor representation of the input text (“Hello world, I missed you so much”), Tacotron2 generates a Mel spectrogram as shown on the illustration
            • +
            • Waveglow generates sound given the mel spectrogram
            • +
            • the output sound is saved in an ‘audio.wav’ file
            • +
            + +

            To run the example you need some extra python packages installed. +These are needed for preprocessing the text and audio, as well as for display and input / output.

            +
            pip install numpy scipy librosa unidecode inflect librosa
            +apt-get update
            +apt-get install -y libsndfile1
            +
            + +

            Load the WaveGlow model pre-trained on LJ Speech dataset

            +
            import torch
            +waveglow = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_waveglow', model_math='fp32')
            +
            + +

            Prepare the WaveGlow model for inference

            +
            waveglow = waveglow.remove_weightnorm(waveglow)
            +waveglow = waveglow.to('cuda')
            +waveglow.eval()
            +
            + +

            Load a pretrained Tacotron2 model

            +
            tacotron2 = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_tacotron2', model_math='fp32')
            +tacotron2 = tacotron2.to('cuda')
            +tacotron2.eval()
            +
            + +

            Now, let’s make the model say:

            +
            text = "hello world, I missed you so much"
            +
            + +

            Format the input using utility methods

            +
            utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_tts_utils')
            +sequences, lengths = utils.prepare_input_sequence([text])
            +
            + +

            Run the chained models

            +
            with torch.no_grad():
            +    mel, _, _ = tacotron2.infer(sequences, lengths)
            +    audio = waveglow.infer(mel)
            +audio_numpy = audio[0].data.cpu().numpy()
            +rate = 22050
            +
            + +

            You can write it to a file and listen to it

            +
            from scipy.io.wavfile import write
            +write("audio.wav", rate, audio_numpy)
            +
            + +

            Alternatively, play it right away in a notebook with IPython widgets

            +
            from IPython.display import Audio
            +Audio(audio_numpy, rate=rate)
            +
            + +

            Details

            +

            For detailed information on model input and output, training recipies, inference and performance visit: github and/or NGC

            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_fairseq_roberta/index.html b/hub/pytorch_fairseq_roberta/index.html new file mode 100644 index 000000000000..fe4bde689867 --- /dev/null +++ b/hub/pytorch_fairseq_roberta/index.html @@ -0,0 +1,415 @@ + + + + + + + + + + + + + RoBERTa | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + RoBERTa +

            + +
            +
            +

            By Facebook AI (fairseq Team)

            +
            + +
            +

            A Robustly Optimized BERT Pretraining Approach

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +

            Model Description

            + +

            Bidirectional Encoder Representations from Transformers, or BERT, is a +revolutionary self-supervised pretraining technique that learns to predict +intentionally hidden (masked) sections of text. Crucially, the representations +learned by BERT have been shown to generalize well to downstream tasks, and when +BERT was first released in 2018 it achieved state-of-the-art results on many NLP +benchmark datasets.

            + +

            RoBERTa builds on BERT’s language masking strategy and modifies key +hyperparameters in BERT, including removing BERT’s next-sentence pretraining +objective, and training with much larger mini-batches and learning rates. +RoBERTa was also trained on an order of magnitude more data than BERT, for a +longer amount of time. This allows RoBERTa representations to generalize even +better to downstream tasks compared to BERT.

            + +

            Requirements

            + +

            We require a few additional Python dependencies for preprocessing:

            + +
            pip install regex requests hydra-core omegaconf
            +
            + +

            Example

            + +
            Load RoBERTa
            +
            import torch
            +roberta = torch.hub.load('pytorch/fairseq', 'roberta.large')
            +roberta.eval()  # disable dropout (or leave in train mode to finetune)
            +
            + +
            Apply Byte-Pair Encoding (BPE) to input text
            +
            tokens = roberta.encode('Hello world!')
            +assert tokens.tolist() == [0, 31414, 232, 328, 2]
            +assert roberta.decode(tokens) == 'Hello world!'
            +
            + +
            Extract features from RoBERTa
            +
            # Extract the last layer's features
            +last_layer_features = roberta.extract_features(tokens)
            +assert last_layer_features.size() == torch.Size([1, 5, 1024])
            +
            +# Extract all layer's features (layer 0 is the embedding layer)
            +all_layers = roberta.extract_features(tokens, return_all_hiddens=True)
            +assert len(all_layers) == 25
            +assert torch.all(all_layers[-1] == last_layer_features)
            +
            + +
            Use RoBERTa for sentence-pair classification tasks
            +
            # Download RoBERTa already finetuned for MNLI
            +roberta = torch.hub.load('pytorch/fairseq', 'roberta.large.mnli')
            +roberta.eval()  # disable dropout for evaluation
            +
            +with torch.no_grad():
            +    # Encode a pair of sentences and make a prediction
            +    tokens = roberta.encode('Roberta is a heavily optimized version of BERT.', 'Roberta is not very optimized.')
            +    prediction = roberta.predict('mnli', tokens).argmax().item()
            +    assert prediction == 0  # contradiction
            +
            +    # Encode another pair of sentences
            +    tokens = roberta.encode('Roberta is a heavily optimized version of BERT.', 'Roberta is based on BERT.')
            +    prediction = roberta.predict('mnli', tokens).argmax().item()
            +    assert prediction == 2  # entailment
            +
            + +
            Register a new (randomly initialized) classification head
            +
            roberta.register_classification_head('new_task', num_classes=3)
            +logprobs = roberta.predict('new_task', tokens)  # tensor([[-1.1050, -1.0672, -1.1245]], grad_fn=<LogSoftmaxBackward>)
            +
            + +

            References

            + + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_fairseq_translation/index.html b/hub/pytorch_fairseq_translation/index.html new file mode 100644 index 000000000000..99a66c283303 --- /dev/null +++ b/hub/pytorch_fairseq_translation/index.html @@ -0,0 +1,449 @@ + + + + + + + + + + + + + Transformer (NMT) | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + Transformer (NMT) +

            + +
            +
            +

            By Facebook AI (fairseq Team)

            +
            + +
            +

            Transformer models for English-French and English-German translation.

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +

            Model Description

            + +

            The Transformer, introduced in the paper Attention Is All You Need, is a +powerful sequence-to-sequence modeling architecture capable of producing +state-of-the-art neural machine translation (NMT) systems.

            + +

            Recently, the fairseq team has explored large-scale semi-supervised training of +Transformers using back-translated data, further improving translation quality +over the original model. More details can be found in this blog post.

            + +

            Requirements

            + +

            We require a few additional Python dependencies for preprocessing:

            + +
            pip install bitarray fastBPE hydra-core omegaconf regex requests sacremoses subword_nmt
            +
            + +

            English-to-French Translation

            + +

            To translate from English to French using the model from the paper Scaling +Neural Machine Translation:

            + +
            import torch
            +
            +# Load an En-Fr Transformer model trained on WMT'14 data :
            +en2fr = torch.hub.load('pytorch/fairseq', 'transformer.wmt14.en-fr', tokenizer='moses', bpe='subword_nmt')
            +
            +# Use the GPU (optional):
            +en2fr.cuda()
            +
            +# Translate with beam search:
            +fr = en2fr.translate('Hello world!', beam=5)
            +assert fr == 'Bonjour Ă  tous !'
            +
            +# Manually tokenize:
            +en_toks = en2fr.tokenize('Hello world!')
            +assert en_toks == 'Hello world !'
            +
            +# Manually apply BPE:
            +en_bpe = en2fr.apply_bpe(en_toks)
            +assert en_bpe == 'H@@ ello world !'
            +
            +# Manually binarize:
            +en_bin = en2fr.binarize(en_bpe)
            +assert en_bin.tolist() == [329, 14044, 682, 812, 2]
            +
            +# Generate five translations with top-k sampling:
            +fr_bin = en2fr.generate(en_bin, beam=5, sampling=True, sampling_topk=20)
            +assert len(fr_bin) == 5
            +
            +# Convert one of the samples to a string and detokenize
            +fr_sample = fr_bin[0]['tokens']
            +fr_bpe = en2fr.string(fr_sample)
            +fr_toks = en2fr.remove_bpe(fr_bpe)
            +fr = en2fr.detokenize(fr_toks)
            +assert fr == en2fr.decode(fr_sample)
            +
            + +

            English-to-German Translation

            + +

            Semi-supervised training with back-translation is an effective way of improving +translation systems. In the paper Understanding Back-Translation at Scale, +we back-translate over 200 million German sentences to use as additional +training data. An ensemble of five of these models was the winning submission to +the WMT’18 English-German news translation competition.

            + +

            We can further improved this approach through noisy-channel reranking. More +details can be found in this blog post. An ensemble of models trained with +this technique was the winning submission to the WMT’19 English-German news +translation competition.

            + +

            To translate from English to German using one of the models from the winning submission:

            + +
            import torch
            +
            +# Load an En-De Transformer model trained on WMT'19 data:
            +en2de = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.en-de.single_model', tokenizer='moses', bpe='fastbpe')
            +
            +# Access the underlying TransformerModel
            +assert isinstance(en2de.models[0], torch.nn.Module)
            +
            +# Translate from En-De
            +de = en2de.translate('PyTorch Hub is a pre-trained model repository designed to facilitate research reproducibility.')
            +assert de == 'PyTorch Hub ist ein vorgefertigtes Modell-Repository, das die Reproduzierbarkeit der Forschung erleichtern soll.'
            +
            + +

            We can also do a round-trip translation to create a paraphrase:

            +
            # Round-trip translations between English and German:
            +en2de = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.en-de.single_model', tokenizer='moses', bpe='fastbpe')
            +de2en = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.de-en.single_model', tokenizer='moses', bpe='fastbpe')
            +
            +paraphrase = de2en.translate(en2de.translate('PyTorch Hub is an awesome interface!'))
            +assert paraphrase == 'PyTorch Hub is a fantastic interface!'
            +
            +# Compare the results with English-Russian round-trip translation:
            +en2ru = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.en-ru.single_model', tokenizer='moses', bpe='fastbpe')
            +ru2en = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.ru-en.single_model', tokenizer='moses', bpe='fastbpe')
            +
            +paraphrase = ru2en.translate(en2ru.translate('PyTorch Hub is an awesome interface!'))
            +assert paraphrase == 'PyTorch is a great interface!'
            +
            + +

            References

            + + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_vision_alexnet/index.html b/hub/pytorch_vision_alexnet/index.html new file mode 100644 index 000000000000..721e3d9f5b17 --- /dev/null +++ b/hub/pytorch_vision_alexnet/index.html @@ -0,0 +1,434 @@ + + + + + + + + + + + + + AlexNet | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + AlexNet +

            + +
            +
            +

            By Pytorch Team

            +
            + +
            +

            The 2012 ImageNet winner achieved a top-5 error of 15.3%, more than 10.8 percentage points lower than that of the runner up.

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +model = torch.hub.load('pytorch/vision:v0.10.0', 'alexnet', pretrained=True)
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            Here’s a sample execution.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +preprocess = transforms.Compose([
            +    transforms.Resize(256),
            +    transforms.CenterCrop(224),
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)
            +# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes
            +print(output[0])
            +# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
            +probabilities = torch.nn.functional.softmax(output[0], dim=0)
            +print(probabilities)
            +
            + +
            # Download ImageNet labels
            +!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt
            +
            + +
            # Read the categories
            +with open("imagenet_classes.txt", "r") as f:
            +    categories = [s.strip() for s in f.readlines()]
            +# Show top categories per image
            +top5_prob, top5_catid = torch.topk(probabilities, 5)
            +for i in range(top5_prob.size(0)):
            +    print(categories[top5_catid[i]], top5_prob[i].item())
            +
            + +

            Model Description

            + +

            AlexNet competed in the ImageNet Large Scale Visual Recognition Challenge on September 30, 2012. The network achieved a top-5 error of 15.3%, more than 10.8 percentage points lower than that of the runner up. The original paper’s primary result was that the depth of the model was essential for its high performance, which was computationally expensive, but made feasible due to the utilization of graphics processing units (GPUs) during training.

            + +

            The 1-crop error rates on the ImageNet dataset with the pretrained model are listed below.

            + + + + + + + + + + + + + + + + +
            Model structureTop-1 errorTop-5 error
            AlexNet43.4520.91
            + +

            References

            + +
              +
            1. One weird trick for parallelizing convolutional neural networks.
            2. +
            + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_vision_deeplabv3_resnet101/index.html b/hub/pytorch_vision_deeplabv3_resnet101/index.html new file mode 100644 index 000000000000..469d9b203052 --- /dev/null +++ b/hub/pytorch_vision_deeplabv3_resnet101/index.html @@ -0,0 +1,462 @@ + + + + + + + + + + + + + Deeplabv3 | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + Deeplabv3 +

            + +
            +
            +

            By Pytorch Team

            +
            + +
            +

            DeepLabV3 models with ResNet-50, ResNet-101 and MobileNet-V3 backbones

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +model = torch.hub.load('pytorch/vision:v0.10.0', 'deeplabv3_resnet50', pretrained=True)
            +# or any of these variants
            +# model = torch.hub.load('pytorch/vision:v0.10.0', 'deeplabv3_resnet101', pretrained=True)
            +# model = torch.hub.load('pytorch/vision:v0.10.0', 'deeplabv3_mobilenet_v3_large', pretrained=True)
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (N, 3, H, W), where N is the number of images, H and W are expected to be at least 224 pixels. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            The model returns an OrderedDict with two Tensors that are of the same height and width as the input Tensor, but with 21 classes. +output['out'] contains the semantic masks, and output['aux'] contains the auxiliary loss values per-pixel. In inference mode, output['aux'] is not useful. +So, output['out'] is of shape (N, 21, H, W). More documentation can be found here.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/deeplab1.png", "deeplab1.png")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +input_image = input_image.convert("RGB")
            +preprocess = transforms.Compose([
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)['out'][0]
            +output_predictions = output.argmax(0)
            +
            + +

            The output here is of shape (21, H, W), and at each location, there are unnormalized probabilities corresponding to the prediction of each class. +To get the maximum prediction of each class, and then use it for a downstream task, you can do output_predictions = output.argmax(0).

            + +

            Here’s a small snippet that plots the predictions, with each color being assigned to each class (see the visualized image on the left).

            + +
            # create a color pallette, selecting a color for each class
            +palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])
            +colors = torch.as_tensor([i for i in range(21)])[:, None] * palette
            +colors = (colors % 255).numpy().astype("uint8")
            +
            +# plot the semantic segmentation predictions of 21 classes in each color
            +r = Image.fromarray(output_predictions.byte().cpu().numpy()).resize(input_image.size)
            +r.putpalette(colors)
            +
            +import matplotlib.pyplot as plt
            +plt.imshow(r)
            +# plt.show()
            +
            + +

            Model Description

            + +

            Deeplabv3-ResNet is constructed by a Deeplabv3 model using a ResNet-50 or ResNet-101 backbone. +Deeplabv3-MobileNetV3-Large is constructed by a Deeplabv3 model using the MobileNetV3 large backbone. +The pre-trained model has been trained on a subset of COCO train2017, on the 20 categories that are present in the Pascal VOC dataset.

            + +

            Their accuracies of the pre-trained models evaluated on COCO val2017 dataset are listed below.

            + + + + + + + + + + + + + + + + + + + + + + + + + + +
            Model structureMean IOUGlobal Pixelwise Accuracy
            deeplabv3_resnet5066.492.4
            deeplabv3_resnet10167.492.4
            deeplabv3_mobilenet_v3_large60.391.2
            + +

            Resources

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_vision_densenet/index.html b/hub/pytorch_vision_densenet/index.html new file mode 100644 index 000000000000..44f83483a11f --- /dev/null +++ b/hub/pytorch_vision_densenet/index.html @@ -0,0 +1,465 @@ + + + + + + + + + + + + + Densenet | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + Densenet +

            + +
            +
            +

            By Pytorch Team

            +
            + +
            +

            Dense Convolutional Network (DenseNet), connects each layer to every other layer in a feed-forward fashion.

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +model = torch.hub.load('pytorch/vision:v0.10.0', 'densenet121', pretrained=True)
            +# or any of these variants
            +# model = torch.hub.load('pytorch/vision:v0.10.0', 'densenet169', pretrained=True)
            +# model = torch.hub.load('pytorch/vision:v0.10.0', 'densenet201', pretrained=True)
            +# model = torch.hub.load('pytorch/vision:v0.10.0', 'densenet161', pretrained=True)
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            Here’s a sample execution.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +preprocess = transforms.Compose([
            +    transforms.Resize(256),
            +    transforms.CenterCrop(224),
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)
            +# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes
            +print(output[0])
            +# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
            +probabilities = torch.nn.functional.softmax(output[0], dim=0)
            +print(probabilities)
            +
            + +
            # Download ImageNet labels
            +!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt
            +
            + +
            # Read the categories
            +with open("imagenet_classes.txt", "r") as f:
            +    categories = [s.strip() for s in f.readlines()]
            +# Show top categories per image
            +top5_prob, top5_catid = torch.topk(probabilities, 5)
            +for i in range(top5_prob.size(0)):
            +    print(categories[top5_catid[i]], top5_prob[i].item())
            +
            + +

            Model Description

            + +

            Dense Convolutional Network (DenseNet), connects each layer to every other layer in a feed-forward fashion. Whereas traditional convolutional networks with L layers have L connections - one between each layer and its subsequent layer - our network has L(L+1)/2 direct connections. For each layer, the feature-maps of all preceding layers are used as inputs, and its own feature-maps are used as inputs into all subsequent layers. DenseNets have several compelling advantages: they alleviate the vanishing-gradient problem, strengthen feature propagation, encourage feature reuse, and substantially reduce the number of parameters.

            + +

            The 1-crop error rates on the ImageNet dataset with the pretrained model are listed below.

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            Model structureTop-1 errorTop-5 error
            densenet12125.357.83
            densenet16924.007.00
            densenet20122.806.43
            densenet16122.356.20
            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_vision_fcn_resnet101/index.html b/hub/pytorch_vision_fcn_resnet101/index.html new file mode 100644 index 000000000000..a6595a3e8d11 --- /dev/null +++ b/hub/pytorch_vision_fcn_resnet101/index.html @@ -0,0 +1,452 @@ + + + + + + + + + + + + + FCN | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + FCN +

            + +
            +
            +

            By Pytorch Team

            +
            + +
            +

            Fully-Convolutional Network model with ResNet-50 and ResNet-101 backbones

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +model = torch.hub.load('pytorch/vision:v0.10.0', 'fcn_resnet50', pretrained=True)
            +# or
            +# model = torch.hub.load('pytorch/vision:v0.10.0', 'fcn_resnet101', pretrained=True)
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (N, 3, H, W), where N is the number of images, H and W are expected to be at least 224 pixels. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            The model returns an OrderedDict with two Tensors that are of the same height and width as the input Tensor, but with 21 classes. +output['out'] contains the semantic masks, and output['aux'] contains the auxillary loss values per-pixel. In inference mode, output['aux'] is not useful. +So, output['out'] is of shape (N, 21, H, W). More documentation can be found here.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/deeplab1.png", "deeplab1.png")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +input_image = input_image.convert("RGB")
            +preprocess = transforms.Compose([
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)['out'][0]
            +output_predictions = output.argmax(0)
            +
            + +

            The output here is of shape (21, H, W), and at each location, there are unnormalized probabilities corresponding to the prediction of each class. +To get the maximum prediction of each class, and then use it for a downstream task, you can do output_predictions = output.argmax(0).

            + +

            Here’s a small snippet that plots the predictions, with each color being assigned to each class (see the visualized image on the left).

            + +
            # create a color pallette, selecting a color for each class
            +palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])
            +colors = torch.as_tensor([i for i in range(21)])[:, None] * palette
            +colors = (colors % 255).numpy().astype("uint8")
            +
            +# plot the semantic segmentation predictions of 21 classes in each color
            +r = Image.fromarray(output_predictions.byte().cpu().numpy()).resize(input_image.size)
            +r.putpalette(colors)
            +
            +import matplotlib.pyplot as plt
            +plt.imshow(r)
            +# plt.show()
            +
            + +

            Model Description

            + +

            FCN-ResNet is constructed by a Fully-Convolutional Network model, using a ResNet-50 or a ResNet-101 backbone. +The pre-trained models have been trained on a subset of COCO train2017, on the 20 categories that are present in the Pascal VOC dataset.

            + +

            Their accuracies of the pre-trained models evaluated on COCO val2017 dataset are listed below.

            + + + + + + + + + + + + + + + + + + + + + +
            Model structureMean IOUGlobal Pixelwise Accuracy
            fcn_resnet5060.591.4
            fcn_resnet10163.791.9
            + +

            Resources

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_vision_ghostnet/index.html b/hub/pytorch_vision_ghostnet/index.html new file mode 100644 index 000000000000..e58e64083ceb --- /dev/null +++ b/hub/pytorch_vision_ghostnet/index.html @@ -0,0 +1,444 @@ + + + + + + + + + + + + + GhostNet | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + GhostNet +

            + +
            +
            +

            By Huawei Noah's Ark Lab

            +
            + +
            +

            Efficient networks by generating more features from cheap operations

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +model = torch.hub.load('huawei-noah/ghostnet', 'ghostnet_1x', pretrained=True)
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            Here’s a sample execution.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +preprocess = transforms.Compose([
            +    transforms.Resize(256),
            +    transforms.CenterCrop(224),
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)
            +# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes
            +print(output[0])
            +# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
            +probabilities = torch.nn.functional.softmax(output[0], dim=0)
            +print(probabilities)
            +
            + +
            # Download ImageNet labels
            +!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt
            +
            + +
            # Read the categories
            +with open("imagenet_classes.txt", "r") as f:
            +    categories = [s.strip() for s in f.readlines()]
            +# Show top categories per image
            +top5_prob, top5_catid = torch.topk(probabilities, 5)
            +for i in range(top5_prob.size(0)):
            +    print(categories[top5_catid[i]], top5_prob[i].item())
            +
            + +

            Model Description

            + +

            The GhostNet architecture is based on an Ghost module structure which generate more features from cheap operations. Based on a set of intrinsic feature maps, a series of cheap operations are applied to generate many ghost feature maps that could fully reveal information underlying intrinsic features. Experiments conducted on benchmarks demonstrate that the superiority of GhostNet in terms of speed and accuracy tradeoff.

            + +

            The corresponding accuracy on ImageNet dataset with pretrained model is listed below.

            + + + + + + + + + + + + + + + + + + +
            Model structureFLOPsTop-1 accTop-5 acc
            GhostNet 1.0x142M73.9891.46
            + +

            References

            + +

            You can read the full paper at this link.

            + +
            +

            @inproceedings{han2019ghostnet, + title={GhostNet: More Features from Cheap Operations}, + author={Kai Han and Yunhe Wang and Qi Tian and Jianyuan Guo and Chunjing Xu and Chang Xu}, + booktitle={CVPR}, + year={2020}, +}

            +
            + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_vision_googlenet/index.html b/hub/pytorch_vision_googlenet/index.html new file mode 100644 index 000000000000..c65e8e9a2a12 --- /dev/null +++ b/hub/pytorch_vision_googlenet/index.html @@ -0,0 +1,432 @@ + + + + + + + + + + + + + GoogLeNet | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + GoogLeNet +

            + +
            +
            +

            By Pytorch Team

            +
            + +
            +

            GoogLeNet was based on a deep convolutional neural network architecture codenamed "Inception" which won ImageNet 2014.

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +model = torch.hub.load('pytorch/vision:v0.10.0', 'googlenet', pretrained=True)
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            Here’s a sample execution.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +preprocess = transforms.Compose([
            +    transforms.Resize(256),
            +    transforms.CenterCrop(224),
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)
            +# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes
            +print(output[0])
            +# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
            +probabilities = torch.nn.functional.softmax(output[0], dim=0)
            +print(probabilities)
            +
            + +
            # Download ImageNet labels
            +!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt
            +
            + +
            # Read the categories
            +with open("imagenet_classes.txt", "r") as f:
            +    categories = [s.strip() for s in f.readlines()]
            +# Show top categories per image
            +top5_prob, top5_catid = torch.topk(probabilities, 5)
            +for i in range(top5_prob.size(0)):
            +    print(categories[top5_catid[i]], top5_prob[i].item())
            +
            + +

            Model Description

            + +

            GoogLeNet was based on a deep convolutional neural network architecture codenamed “Inception”, which was responsible for setting the new state of the art for classification and detection in the ImageNet Large-Scale Visual Recognition Challenge 2014 (ILSVRC 2014). The 1-crop error rates on the ImageNet dataset with a pretrained model are list below.

            + + + + + + + + + + + + + + + + +
            Model structureTop-1 errorTop-5 error
            googlenet30.2210.47
            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_vision_hardnet/index.html b/hub/pytorch_vision_hardnet/index.html new file mode 100644 index 000000000000..5ce7dedb72d1 --- /dev/null +++ b/hub/pytorch_vision_hardnet/index.html @@ -0,0 +1,471 @@ + + + + + + + + + + + + + HarDNet | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + HarDNet +

            + +
            +
            +

            By PingoLH

            +
            + +
            +

            Harmonic DenseNet pre-trained on ImageNet

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +model = torch.hub.load('PingoLH/Pytorch-HarDNet', 'hardnet68', pretrained=True)
            +# or any of these variants
            +# model = torch.hub.load('PingoLH/Pytorch-HarDNet', 'hardnet85', pretrained=True)
            +# model = torch.hub.load('PingoLH/Pytorch-HarDNet', 'hardnet68ds', pretrained=True)
            +# model = torch.hub.load('PingoLH/Pytorch-HarDNet', 'hardnet39ds', pretrained=True)
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            Here’s a sample execution.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +preprocess = transforms.Compose([
            +    transforms.Resize(256),
            +    transforms.CenterCrop(224),
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)
            +# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes
            +print(output[0])
            +# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
            +probabilities = torch.nn.functional.softmax(output[0], dim=0)
            +print(probabilities)
            +
            + +
            # Download ImageNet labels
            +!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt
            +
            + +
            # Read the categories
            +with open("imagenet_classes.txt", "r") as f:
            +    categories = [s.strip() for s in f.readlines()]
            +# Show top categories per image
            +top5_prob, top5_catid = torch.topk(probabilities, 5)
            +for i in range(top5_prob.size(0)):
            +    print(categories[top5_catid[i]], top5_prob[i].item())
            +
            + +

            Model Description

            + +

            Harmonic DenseNet (HarDNet) is a low memory traffic CNN model, which is fast and efficient. +The basic concept is to minimize both computational cost and memory access cost at the same +time, such that the HarDNet models are 35% faster than ResNet running on GPU +comparing to models with the same accuracy (except the two DS models that +were designed for comparing with MobileNet).

            + +

            Here we have the 4 versions of hardnet models, which contains 39, 68, 85 layers +w/ or w/o Depthwise Separable Conv respectively. +Their 1-crop error rates on ImageNet dataset with pretrained models are listed below.

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            Model structureTop-1 errorTop-5 error
            hardnet39ds27.929.57
            hardnet68ds25.718.13
            hardnet6823.526.99
            hardnet8521.966.11
            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_vision_ibnnet/index.html b/hub/pytorch_vision_ibnnet/index.html new file mode 100644 index 000000000000..9f4c68558ce0 --- /dev/null +++ b/hub/pytorch_vision_ibnnet/index.html @@ -0,0 +1,501 @@ + + + + + + + + + + + + + IBN-Net | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + IBN-Net +

            + +
            +
            +

            By Xingang Pan

            +
            + +
            +

            Networks with domain/appearance invariance

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +model = torch.hub.load('XingangPan/IBN-Net', 'resnet50_ibn_a', pretrained=True)
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            Here’s a sample execution.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +preprocess = transforms.Compose([
            +    transforms.Resize(256),
            +    transforms.CenterCrop(224),
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)
            +# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes
            +print(output[0])
            +# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
            +probabilities = torch.nn.functional.softmax(output[0], dim=0)
            +print(probabilities)
            +
            + +
            # Download ImageNet labels
            +!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt
            +
            + +
            # Read the categories
            +with open("imagenet_classes.txt", "r") as f:
            +    categories = [s.strip() for s in f.readlines()]
            +# Show top categories per image
            +top5_prob, top5_catid = torch.topk(probabilities, 5)
            +for i in range(top5_prob.size(0)):
            +    print(categories[top5_catid[i]], top5_prob[i].item())
            +
            + +

            Model Description

            + +

            IBN-Net is a CNN model with domain/appearance invariance. +Motivated by style transfer works, IBN-Net carefully unifies instance normalization and batch normalization in a single deep network. +It provides a simple way to increase both modeling and generalization capacities without adding model complexity. +IBN-Net is especially suitable for cross domain or person/vehicle re-identification tasks.

            + +

            The corresponding accuracies on ImageNet dataset with pretrained models are listed below.

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            Model nameTop-1 accTop-5 acc
            resnet50_ibn_a77.4693.68
            resnet101_ibn_a78.6194.41
            resnext101_ibn_a79.1294.58
            se_resnet101_ibn_a78.7594.49
            + +

            The rank1/mAP on two Re-ID benchmarks Market1501 and DukeMTMC-reID are listed below (from michuanhaohao/reid-strong-baseline).

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            BackboneMarket1501DukeMTMC-reID
            ResNet5094.5 (85.9)86.4 (76.4)
            ResNet10194.5 (87.1)87.6 (77.6)
            SeResNet5094.4 (86.3)86.4 (76.5)
            SeResNet10194.6 (87.3)87.5 (78.0)
            SeResNeXt5094.9 (87.6)88.0 (78.3)
            SeResNeXt10195.0 (88.0)88.4 (79.0)
            ResNet50-IBN-a95.0 (88.2)90.1 (79.1)
            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_vision_inception_v3/index.html b/hub/pytorch_vision_inception_v3/index.html new file mode 100644 index 000000000000..33502be36307 --- /dev/null +++ b/hub/pytorch_vision_inception_v3/index.html @@ -0,0 +1,434 @@ + + + + + + + + + + + + + Inception_v3 | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + Inception_v3 +

            + +
            +
            +

            By Pytorch Team

            +
            + +
            +

            Also called GoogleNetv3, a famous ConvNet trained on ImageNet from 2015

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +model = torch.hub.load('pytorch/vision:v0.10.0', 'inception_v3', pretrained=True)
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 299. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            Here’s a sample execution.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +preprocess = transforms.Compose([
            +    transforms.Resize(299),
            +    transforms.CenterCrop(299),
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +  output = model(input_batch)
            +# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes
            +print(output[0])
            +# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
            +probabilities = torch.nn.functional.softmax(output[0], dim=0)
            +print(probabilities)
            +
            + +
            # Download ImageNet labels
            +!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt
            +
            + +
            # Read the categories
            +with open("imagenet_classes.txt", "r") as f:
            +    categories = [s.strip() for s in f.readlines()]
            +# Show top categories per image
            +top5_prob, top5_catid = torch.topk(probabilities, 5)
            +for i in range(top5_prob.size(0)):
            +    print(categories[top5_catid[i]], top5_prob[i].item())
            +
            + +

            Model Description

            + +

            Inception v3: Based on the exploration of ways to scale up networks in ways that aim at utilizing the added computation as efficiently as possible by suitably factorized convolutions and aggressive regularization. We benchmark our methods on the ILSVRC 2012 classification challenge validation set demonstrate substantial gains over the state of the art: 21.2% top-1 and 5.6% top-5 error for single frame evaluation using a network with a computational cost of 5 billion multiply-adds per inference and with using less than 25 million parameters. With an ensemble of 4 models and multi-crop evaluation, we report 3.5% top-5 error on the validation set (3.6% error on the test set) and 17.3% top-1 error on the validation set.

            + +

            The 1-crop error rates on the ImageNet dataset with the pretrained model are listed below.

            + + + + + + + + + + + + + + + + +
            Model structureTop-1 errorTop-5 error
            inception_v322.556.44
            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_vision_meal_v2/index.html b/hub/pytorch_vision_meal_v2/index.html new file mode 100644 index 000000000000..24f7193de6c1 --- /dev/null +++ b/hub/pytorch_vision_meal_v2/index.html @@ -0,0 +1,498 @@ + + + + + + + + + + + + + MEAL_V2 | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + MEAL_V2 +

            + +
            +
            +

            By Carnegie Mellon University

            +
            + +
            +

            Boosting Tiny and Efficient Models using Knowledge Distillation.

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +

            We require one additional Python dependency

            + +
            !pip install timm
            +
            + +
            import torch
            +# list of models: 'mealv1_resnest50', 'mealv2_resnest50', 'mealv2_resnest50_cutmix', 'mealv2_resnest50_380x380', 'mealv2_mobilenetv3_small_075', 'mealv2_mobilenetv3_small_100', 'mealv2_mobilenet_v3_large_100', 'mealv2_efficientnet_b0'
            +# load pretrained models, using "mealv2_resnest50_cutmix" as an example
            +model = torch.hub.load('szq0214/MEAL-V2','meal_v2', 'mealv2_resnest50_cutmix', pretrained=True)
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            Here’s a sample execution.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +preprocess = transforms.Compose([
            +    transforms.Resize(256),
            +    transforms.CenterCrop(224),
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)
            +# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes
            +print(output[0])
            +# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
            +probabilities = torch.nn.functional.softmax(output[0], dim=0)
            +print(probabilities)
            +
            + +
            # Download ImageNet labels
            +!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt
            +
            + +
            # Read the categories
            +with open("imagenet_classes.txt", "r") as f:
            +    categories = [s.strip() for s in f.readlines()]
            +# Show top categories per image
            +top5_prob, top5_catid = torch.topk(probabilities, 5)
            +for i in range(top5_prob.size(0)):
            +    print(categories[top5_catid[i]], top5_prob[i].item())
            +
            + +

            Model Description

            + +

            MEAL V2 models are from the MEAL V2: Boosting Vanilla ResNet-50 to 80%+ Top-1 Accuracy on ImageNet without Tricks paper.

            + +

            In this paper, we introduce a simple yet effective approach that can boost the vanilla ResNet-50 to 80%+ Top-1 accuracy on ImageNet without any tricks. Generally, our method is based on the recently proposed MEAL, i.e., ensemble knowledge distillation via discriminators. We further simplify it through 1) adopting the similarity loss and discriminator only on the final outputs and 2) using the average of softmax probabilities from all teacher ensembles as the stronger supervision for distillation. One crucial perspective of our method is that the one-hot/hard label should not be used in the distillation process. We show that such a simple framework can achieve state-of-the-art results without involving any commonly-used tricks, such as 1) architecture modification; 2) outside training data beyond ImageNet; 3) autoaug/randaug; 4) cosine learning rate; 5) mixup/cutmix training; 6) label smoothing; etc.

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            ModelsResolution#ParametersTop-1/Top-5 
            MEAL-V1 w/ ResNet5022425.6M78.21/94.01GitHub
            MEAL-V2 w/ ResNet5022425.6M80.67/95.09 
            MEAL-V2 w/ ResNet5038025.6M81.72/95.81 
            MEAL-V2 + CutMix w/ ResNet5022425.6M80.98/95.35 
            MEAL-V2 w/ MobileNet V3-Small 0.752242.04M67.60/87.23 
            MEAL-V2 w/ MobileNet V3-Small 1.02242.54M69.65/88.71 
            MEAL-V2 w/ MobileNet V3-Large 1.02245.48M76.92/93.32 
            MEAL-V2 w/ EfficientNet-B02245.29M78.29/93.95 
            + +

            References

            + +

            Please refer to our papers MEAL V2, MEAL for more details.

            + +
            @article{shen2020mealv2,
            +    title={MEAL V2: Boosting Vanilla ResNet-50 to 80%+ Top-1 Accuracy on ImageNet without Tricks},
            +    author={Shen, Zhiqiang and Savvides, Marios},
            +    journal={arXiv preprint arXiv:2009.08453},
            +    year={2020}
            +}
            +
            +@inproceedings{shen2019MEAL,
            +	title = {MEAL: Multi-Model Ensemble via Adversarial Learning},
            +	author = {Shen, Zhiqiang and He, Zhankui and Xue, Xiangyang},
            +	booktitle = {AAAI},
            +	year = {2019}
            +}
            +
            + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_vision_mobilenet_v2/index.html b/hub/pytorch_vision_mobilenet_v2/index.html new file mode 100644 index 000000000000..4f84acf85874 --- /dev/null +++ b/hub/pytorch_vision_mobilenet_v2/index.html @@ -0,0 +1,432 @@ + + + + + + + + + + + + + MobileNet v2 | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + MobileNet v2 +

            + +
            +
            +

            By Pytorch Team

            +
            + +
            +

            Efficient networks optimized for speed and memory, with residual blocks

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +model = torch.hub.load('pytorch/vision:v0.10.0', 'mobilenet_v2', pretrained=True)
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            Here’s a sample execution.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +preprocess = transforms.Compose([
            +    transforms.Resize(256),
            +    transforms.CenterCrop(224),
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)
            +# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes
            +print(output[0])
            +# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
            +probabilities = torch.nn.functional.softmax(output[0], dim=0)
            +print(probabilities)
            +
            + +
            # Download ImageNet labels
            +!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt
            +
            + +
            # Read the categories
            +with open("imagenet_classes.txt", "r") as f:
            +    categories = [s.strip() for s in f.readlines()]
            +# Show top categories per image
            +top5_prob, top5_catid = torch.topk(probabilities, 5)
            +for i in range(top5_prob.size(0)):
            +    print(categories[top5_catid[i]], top5_prob[i].item())
            +
            + +

            Model Description

            + +

            The MobileNet v2 architecture is based on an inverted residual structure where the input and output of the residual block are thin bottleneck layers opposite to traditional residual models which use expanded representations in the input. MobileNet v2 uses lightweight depthwise convolutions to filter features in the intermediate expansion layer. Additionally, non-linearities in the narrow layers were removed in order to maintain representational power.

            + + + + + + + + + + + + + + + + +
            Model structureTop-1 errorTop-5 error
            mobilenet_v228.129.71
            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_vision_once_for_all/index.html b/hub/pytorch_vision_once_for_all/index.html new file mode 100644 index 000000000000..c1781d59522d --- /dev/null +++ b/hub/pytorch_vision_once_for_all/index.html @@ -0,0 +1,485 @@ + + + + + + + + + + + + + Once-for-All | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + Once-for-All +

            + +
            +
            +

            By MIT Han Lab

            +
            + +
            +

            Once-for-all (OFA) decouples training and search, and achieves efficient inference across various edge devices and resource constraints.

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +

            Get supernet

            + +

            You can quickly load a supernet as following

            + +
            import torch
            +super_net_name = "ofa_supernet_mbv3_w10" 
            +# other options: 
            +#    ofa_supernet_resnet50 / 
            +#    ofa_supernet_mbv3_w12 / 
            +#    ofa_supernet_proxyless
            +
            +super_net = torch.hub.load('mit-han-lab/once-for-all', super_net_name, pretrained=True).eval()
            +
            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            OFA NetworkDesign SpaceResolutionWidth MultiplierDepthExpand Ratiokernel Size
            ofa_resnet50ResNet50D128 - 2240.65, 0.8, 1.00, 1, 20.2, 0.25, 0.353
            ofa_mbv3_d234_e346_k357_w1.0MobileNetV3128 - 2241.02, 3, 43, 4, 63, 5, 7
            ofa_mbv3_d234_e346_k357_w1.2MobileNetV3160 - 2241.22, 3, 43, 4, 63, 5, 7
            ofa_proxyless_d234_e346_k357_w1.3ProxylessNAS128 - 2241.32, 3, 43, 4, 63, 5, 7
            + +

            Below are the usage of sampling / selecting a subnet from the supernet

            + +
            # Randomly sample sub-networks from OFA network
            +super_net.sample_active_subnet()
            +random_subnet = super_net.get_active_subnet(preserve_weight=True)
            +    
            +# Manually set the sub-network
            +super_net.set_active_subnet(ks=7, e=6, d=4)
            +manual_subnet = super_net.get_active_subnet(preserve_weight=True)
            +
            + +

            Get Specialized Architecture

            + +
            import torch
            +
            +# or load a architecture specialized for certain platform
            +net_config = "resnet50D_MAC_4_1B"
            +
            +specialized_net, image_size = torch.hub.load('mit-han-lab/once-for-all', net_config, pretrained=True)
            +specialized_net.eval()
            +
            + +

            More models and configurations can be found in once-for-all/model-zoo +and obtained through the following scripts

            + +
            ofa_specialized_get = torch.hub.load('mit-han-lab/once-for-all', "ofa_specialized_get")
            +model, image_size = ofa_specialized_get("flops@595M_top1@80.0_finetune@75", pretrained=True)
            +model.eval()
            +
            + +

            The model’s prediction can be evalutaed by

            +
            # Download an example image from pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            +try: 
            +  urllib.URLopener().retrieve(url, filename)
            +except: 
            +  urllib.request.urlretrieve(url, filename)
            +
            +
            +# sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +preprocess = transforms.Compose([
            +    transforms.Resize(256),
            +    transforms.CenterCrop(224),
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)
            +# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes
            +print(output[0])
            +# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
            +probabilities = torch.nn.functional.softmax(output[0], dim=0)
            +print(probabilities)
            +
            +
            + +

            Model Description

            +

            Once-for-all models are from Once for All: Train One Network and Specialize it for Efficient Deployment. Conventional approaches either manually design or use neural architecture search (NAS) to find a specialized neural network and train it from scratch for each case, which is computationally prohibitive (causing CO2 emission as much as 5 cars’ lifetime) thus unscalable. In this work, we propose to train a once-for-all (OFA) network that supports diverse architectural settings by decoupling training and search. Across diverse edge devices, OFA consistently outperforms state-of-the-art (SOTA) NAS methods (up to 4.0% ImageNet top1 accuracy improvement over MobileNetV3, or same accuracy but 1.5x faster than MobileNetV3, 2.6x faster than EfficientNet w.r.t measured latency) while reducing many orders of magnitude GPU hours and CO2 emission. In particular, OFA achieves a new SOTA 80.0% ImageNet top-1 accuracy under the mobile setting (<600M MACs).

            + + +

            + +

            References

            + +
            @inproceedings{
            +  cai2020once,
            +  title={Once for All: Train One Network and Specialize it for Efficient Deployment},
            +  author={Han Cai and Chuang Gan and Tianzhe Wang and Zhekai Zhang and Song Han},
            +  booktitle={International Conference on Learning Representations},
            +  year={2020},
            +  url={https://arxiv.org/pdf/1908.09791.pdf}
            +}
            +
            + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_vision_proxylessnas/index.html b/hub/pytorch_vision_proxylessnas/index.html new file mode 100644 index 000000000000..0da39e47f4d1 --- /dev/null +++ b/hub/pytorch_vision_proxylessnas/index.html @@ -0,0 +1,485 @@ + + + + + + + + + + + + + ProxylessNAS | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + ProxylessNAS +

            + +
            +
            +

            By MIT Han Lab

            +
            + +
            +

            Proxylessly specialize CNN architectures for different hardware platforms.

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +target_platform = "proxyless_cpu"
            +# proxyless_gpu, proxyless_mobile, proxyless_mobile14 are also avaliable.
            +model = torch.hub.load('mit-han-lab/ProxylessNAS', target_platform, pretrained=True)
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            Here’s a sample execution.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +preprocess = transforms.Compose([
            +    transforms.Resize(256),
            +    transforms.CenterCrop(224),
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)
            +# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes
            +print(output[0])
            +# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
            +probabilities = torch.nn.functional.softmax(output[0], dim=0)
            +print(probabilities)
            +
            + +
            # Download ImageNet labels
            +!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt
            +
            + +
            # Read the categories
            +with open("imagenet_classes.txt", "r") as f:
            +    categories = [s.strip() for s in f.readlines()]
            +# Show top categories per image
            +top5_prob, top5_catid = torch.topk(probabilities, 5)
            +for i in range(top5_prob.size(0)):
            +    print(categories[top5_catid[i]], top5_prob[i].item())
            +
            + +

            Model Description

            + +

            ProxylessNAS models are from the ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware paper.

            + +

            Conventionally, people tend to design one efficient model for all hardware platforms. But different hardware has different properties, for example, CPU has higher frequency and GPU is better at parallization. Therefore, instead of generalizing, we need to specialize CNN architectures for different hardware platforms. As shown in below, with similar accuracy, specialization offers free yet significant performance boost on all three platforms.

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            Model structureGPU LatencyCPU LatencyMobile Latency
            proxylessnas_gpu5.1ms204.9ms124ms
            proxylessnas_cpu7.4ms138.7ms116ms
            proxylessnas_mobile7.2ms164.1ms78ms
            + +

            The corresponding top-1 accuracy with pretrained models are listed below.

            + + + + + + + + + + + + + + + + + + + + + + + + + + +
            Model structureTop-1 error
            proxylessnas_cpu24.7
            proxylessnas_gpu24.9
            proxylessnas_mobile25.4
            proxylessnas_mobile_1423.3
            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_vision_resnest/index.html b/hub/pytorch_vision_resnest/index.html new file mode 100644 index 000000000000..f1079d7851f5 --- /dev/null +++ b/hub/pytorch_vision_resnest/index.html @@ -0,0 +1,461 @@ + + + + + + + + + + + + + ResNeSt | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + ResNeSt +

            + +
            +
            +

            By Hang Zhang

            +
            + +
            +

            A new ResNet variant.

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +# get list of models
            +torch.hub.list('zhanghang1989/ResNeSt', force_reload=True)
            +# load pretrained models, using ResNeSt-50 as an example
            +model = torch.hub.load('zhanghang1989/ResNeSt', 'resnest50', pretrained=True)
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            Here’s a sample execution.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +preprocess = transforms.Compose([
            +    transforms.Resize(256),
            +    transforms.CenterCrop(224),
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)
            +# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes
            +print(output[0])
            +# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
            +probabilities = torch.nn.functional.softmax(output[0], dim=0)
            +print(probabilities)
            +
            + +
            # Download ImageNet labels
            +!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt
            +
            + +
            # Read the categories
            +with open("imagenet_classes.txt", "r") as f:
            +    categories = [s.strip() for s in f.readlines()]
            +# Show top categories per image
            +top5_prob, top5_catid = torch.topk(probabilities, 5)
            +for i in range(top5_prob.size(0)):
            +    print(categories[top5_catid[i]], top5_prob[i].item())
            +
            + +

            Model Description

            + +

            ResNeSt models are from the ResNeSt: Split-Attention Networks paper.

            + +

            While image classification models have recently continued to advance, most downstream applications such as object detection and semantic segmentation still employ ResNet variants as the backbone network due to their simple and modular structure. We present a simple and modular Split-Attention block that enables attention across feature-map groups. By stacking these Split-Attention blocks ResNet-style, we obtain a new ResNet variant which we call ResNeSt. Our network preserves the overall ResNet structure to be used in downstream tasks straightforwardly without introducing additional computational costs. ResNeSt models outperform other networks with similar model complexities, and also help downstream tasks including object detection, instance segmentation and semantic segmentation.

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
             crop sizePyTorch
            ResNeSt-5022481.03
            ResNeSt-10125682.83
            ResNeSt-20032083.84
            ResNeSt-26941684.54
            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_vision_resnet/index.html b/hub/pytorch_vision_resnet/index.html new file mode 100644 index 000000000000..9e62d5c8cbcb --- /dev/null +++ b/hub/pytorch_vision_resnet/index.html @@ -0,0 +1,475 @@ + + + + + + + + + + + + + ResNet | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + ResNet +

            + +
            +
            +

            By Pytorch Team

            +
            + +
            +

            Deep residual networks pre-trained on ImageNet

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet18', pretrained=True)
            +# or any of these variants
            +# model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet34', pretrained=True)
            +# model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet50', pretrained=True)
            +# model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet101', pretrained=True)
            +# model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet152', pretrained=True)
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            Here’s a sample execution.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +preprocess = transforms.Compose([
            +    transforms.Resize(256),
            +    transforms.CenterCrop(224),
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)
            +# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes
            +print(output[0])
            +# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
            +probabilities = torch.nn.functional.softmax(output[0], dim=0)
            +print(probabilities)
            +
            + +
            # Download ImageNet labels
            +!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt
            +
            + +
            # Read the categories
            +with open("imagenet_classes.txt", "r") as f:
            +    categories = [s.strip() for s in f.readlines()]
            +# Show top categories per image
            +top5_prob, top5_catid = torch.topk(probabilities, 5)
            +for i in range(top5_prob.size(0)):
            +    print(categories[top5_catid[i]], top5_prob[i].item())
            +
            + +

            Model Description

            + +

            Resnet models were proposed in “Deep Residual Learning for Image Recognition”. +Here we have the 5 versions of resnet models, which contains 18, 34, 50, 101, 152 layers respectively. +Detailed model architectures can be found in Table 1. +Their 1-crop error rates on ImageNet dataset with pretrained models are listed below.

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            Model structureTop-1 errorTop-5 error
            resnet1830.2410.92
            resnet3426.708.58
            resnet5023.857.13
            resnet10122.636.44
            resnet15221.695.94
            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_vision_resnext/index.html b/hub/pytorch_vision_resnext/index.html new file mode 100644 index 000000000000..9121dcf055ca --- /dev/null +++ b/hub/pytorch_vision_resnext/index.html @@ -0,0 +1,450 @@ + + + + + + + + + + + + + ResNext | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + ResNext +

            + +
            +
            +

            By Pytorch Team

            +
            + +
            +

            Next generation ResNets, more efficient and accurate

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +model = torch.hub.load('pytorch/vision:v0.10.0', 'resnext50_32x4d', pretrained=True)
            +# or
            +# model = torch.hub.load('pytorch/vision:v0.10.0', 'resnext101_32x8d', pretrained=True)
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            Here’s a sample execution.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +preprocess = transforms.Compose([
            +    transforms.Resize(256),
            +    transforms.CenterCrop(224),
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)
            +# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes
            +print(output[0])
            +# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
            +probabilities = torch.nn.functional.softmax(output[0], dim=0)
            +print(probabilities)
            +
            + +
            # Download ImageNet labels
            +!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt
            +
            + +
            # Read the categories
            +with open("imagenet_classes.txt", "r") as f:
            +    categories = [s.strip() for s in f.readlines()]
            +
            +# Show top categories per image
            +top5_prob, top5_catid = torch.topk(probabilities, 5)
            +
            +for i in range(top5_prob.size(0)):
            +    print(categories[top5_catid[i]], top5_prob[i].item())
            +
            + +

            Model Description

            + +

            Resnext models were proposed in Aggregated Residual Transformations for Deep Neural Networks. +Here we have the 2 versions of resnet models, which contains 50, 101 layers repspectively. +A comparison in model archetechure between resnet50 and resnext50 can be found in Table 1. +Their 1-crop error rates on ImageNet dataset with pretrained models are listed below.

            + + + + + + + + + + + + + + + + + + + + + +
            Model structureTop-1 errorTop-5 error
            resnext50_32x4d22.386.30
            resnext101_32x8d20.695.47
            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_vision_shufflenet_v2/index.html b/hub/pytorch_vision_shufflenet_v2/index.html new file mode 100644 index 000000000000..6e4c5e0f5d29 --- /dev/null +++ b/hub/pytorch_vision_shufflenet_v2/index.html @@ -0,0 +1,432 @@ + + + + + + + + + + + + + ShuffleNet v2 | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + ShuffleNet v2 +

            + +
            +
            +

            By Pytorch Team

            +
            + +
            +

            An efficient ConvNet optimized for speed and memory, pre-trained on ImageNet

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +model = torch.hub.load('pytorch/vision:v0.10.0', 'shufflenet_v2_x1_0', pretrained=True)
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            Here’s a sample execution.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +preprocess = transforms.Compose([
            +    transforms.Resize(256),
            +    transforms.CenterCrop(224),
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)
            +# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes
            +print(output[0])
            +# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
            +probabilities = torch.nn.functional.softmax(output[0], dim=0)
            +print(probabilities)
            +
            + +
            # Download ImageNet labels
            +!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt
            +
            + +
            # Read the categories
            +with open("imagenet_classes.txt", "r") as f:
            +    categories = [s.strip() for s in f.readlines()]
            +# Show top categories per image
            +top5_prob, top5_catid = torch.topk(probabilities, 5)
            +for i in range(top5_prob.size(0)):
            +    print(categories[top5_catid[i]], top5_prob[i].item())
            +
            + +

            Model Description

            + +

            Previously, neural network architecture design was mostly guided by the indirect metric of computation complexity, i.e., FLOPs. However, the direct metric, e.g., speed, also depends on the other factors such as memory access cost and platform characteristics. Based on a series of controlled experiments, this work derives several practical guidelines for efficient network design. Accordingly, a new architecture is presented, called ShuffleNet V2. Comprehensive ablation experiments verify that our model is the state of-the-art in terms of speed and accuracy tradeoff.

            + + + + + + + + + + + + + + + + +
            Model structureTop-1 errorTop-5 error
            shufflenet_v230.6411.68
            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_vision_snnmlp/index.html b/hub/pytorch_vision_snnmlp/index.html new file mode 100644 index 000000000000..c4595df9c610 --- /dev/null +++ b/hub/pytorch_vision_snnmlp/index.html @@ -0,0 +1,720 @@ + + + + + + + + + + + + + SNNMLP | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + SNNMLP +

            + +
            +
            +

            By Huawei Noah's Ark Lab

            +
            + +
            +

            Brain-inspired Multilayer Perceptron with Spiking Neurons

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +model = torch.hub.load('huawei-noah/Efficient-AI-Backbones', 'snnmlp_t', pretrained=True)
            +# or
            +# model = torch.hub.load('huawei-noah/Efficient-AI-Backbones', 'snnmlp_s', pretrained=True)
            +# or
            +# model = torch.hub.load('huawei-noah/Efficient-AI-Backbones', 'snnmlp_b', pretrained=True)
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            Here’s a sample execution.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/dog.jpg", "dog.jpg")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +preprocess = transforms.Compose([
            +    transforms.Resize(256),
            +    transforms.CenterCrop(224),
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)
            +# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes
            +print(output[0])
            +# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
            +print(torch.nn.functional.softmax(output[0], dim=0))
            +
            +
            + +

            Model Description

            + +

            SNNMLP incorporates the mechanism of LIF neurons into the MLP models, to achieve better accuracy without extra FLOPs. We propose a full-precision LIF operation to communicate between patches, including horizontal LIF and vertical LIF in different directions. We also propose to use group LIF to extract better local features. With LIF modules, our SNNMLP model achieves 81.9%, 83.3% and 83.6% top-1 accuracy on ImageNet dataset with only 4.4G, 8.5G and 15.2G FLOPs, respectively.

            + +

            The corresponding accuracy on ImageNet dataset with pretrained model is listed below.

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            Model structure#ParametersFLOPsTop-1 acc
            SNNMLP Tiny28M4.4G81.88
            SNNMLP Small50M8.5G83.30
            SNNMLP Base88M15.2G85.59
            + +

            References

            + +

            You can read the full paper here.

            +
            @inproceedings{li2022brain,
            +  title={Brain-inspired multilayer perceptron with spiking neurons},
            +  author={Li, Wenshuo and Chen, Hanting and Guo, Jianyuan and Zhang, Ziyang and Wang, Yunhe},
            +  booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
            +  pages={783--793},
            +  year={2022}
            +}
            +
            + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_vision_squeezenet/index.html b/hub/pytorch_vision_squeezenet/index.html new file mode 100644 index 000000000000..25f1daac27bf --- /dev/null +++ b/hub/pytorch_vision_squeezenet/index.html @@ -0,0 +1,450 @@ + + + + + + + + + + + + + SqueezeNet | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + SqueezeNet +

            + +
            +
            +

            By Pytorch Team

            +
            + +
            +

            Alexnet-level accuracy with 50x fewer parameters.

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +model = torch.hub.load('pytorch/vision:v0.10.0', 'squeezenet1_0', pretrained=True)
            +# or
            +# model = torch.hub.load('pytorch/vision:v0.10.0', 'squeezenet1_1', pretrained=True)
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            Here’s a sample execution.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +preprocess = transforms.Compose([
            +    transforms.Resize(256),
            +    transforms.CenterCrop(224),
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)
            +# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes
            +print(output[0])
            +# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
            +probabilities = torch.nn.functional.softmax(output[0], dim=0)
            +print(probabilities)
            +
            + +
            # Download ImageNet labels
            +!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt
            +
            + +
            # Read the categories
            +with open("imagenet_classes.txt", "r") as f:
            +    categories = [s.strip() for s in f.readlines()]
            +# Show top categories per image
            +top5_prob, top5_catid = torch.topk(probabilities, 5)
            +for i in range(top5_prob.size(0)):
            +    print(categories[top5_catid[i]], top5_prob[i].item())
            +
            + +

            Model Description

            + +

            Model squeezenet1_0 is from the SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size paper

            + +

            Model squeezenet1_1 is from the official squeezenet repo. +It has 2.4x less computation and slightly fewer parameters than squeezenet1_0, without sacrificing accuracy.

            + +

            Their 1-crop error rates on ImageNet dataset with pretrained models are listed below.

            + + + + + + + + + + + + + + + + + + + + + +
            Model structureTop-1 errorTop-5 error
            squeezenet1_041.9019.58
            squeezenet1_141.8119.38
            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_vision_vgg/index.html b/hub/pytorch_vision_vgg/index.html new file mode 100644 index 000000000000..701c7320b85c --- /dev/null +++ b/hub/pytorch_vision_vgg/index.html @@ -0,0 +1,505 @@ + + + + + + + + + + + + + vgg-nets | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + vgg-nets +

            + +
            +
            +

            By Pytorch Team

            +
            + +
            +

            Award winning ConvNets from 2014 ImageNet ILSVRC challenge

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg11', pretrained=True)
            +# or any of these variants
            +# model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg11_bn', pretrained=True)
            +# model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg13', pretrained=True)
            +# model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg13_bn', pretrained=True)
            +# model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg16', pretrained=True)
            +# model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg16_bn', pretrained=True)
            +# model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg19', pretrained=True)
            +# model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg19_bn', pretrained=True)
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            Here’s a sample execution.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +preprocess = transforms.Compose([
            +    transforms.Resize(256),
            +    transforms.CenterCrop(224),
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)
            +# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes
            +print(output[0])
            +# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
            +probabilities = torch.nn.functional.softmax(output[0], dim=0)
            +print(probabilities)
            +
            + +
            # Download ImageNet labels
            +!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt
            +
            + +
            # Read the categories
            +with open("imagenet_classes.txt", "r") as f:
            +    categories = [s.strip() for s in f.readlines()]
            +# Show top categories per image
            +top5_prob, top5_catid = torch.topk(probabilities, 5)
            +for i in range(top5_prob.size(0)):
            +    print(categories[top5_catid[i]], top5_prob[i].item())
            +
            + +

            Model Description

            + +

            Here we have implementations for the models proposed in Very Deep Convolutional Networks for Large-Scale Image Recognition, +for each configurations and their with batchnorm version.

            + +

            For example, configuration A presented in the paper is vgg11, configuration B is vgg13, configuration D is vgg16 +and configuration E is vgg19. Their batchnorm version are suffixed with _bn.

            + +

            Their Top-1 error rates on ImageNet dataset with pretrained models are listed below.

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            Model structureTop-1 errorTop-5 error
            vgg1130.9811.37
            vgg11_bn26.708.58
            vgg1330.0710.75
            vgg13_bn28.459.63
            vgg1628.419.62
            vgg16_bn26.638.50
            vgg1927.629.12
            vgg19_bn25.768.15
            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/pytorch_vision_wide_resnet/index.html b/hub/pytorch_vision_wide_resnet/index.html new file mode 100644 index 000000000000..56da111ab441 --- /dev/null +++ b/hub/pytorch_vision_wide_resnet/index.html @@ -0,0 +1,461 @@ + + + + + + + + + + + + + Wide ResNet | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + Wide ResNet +

            + +
            +
            +

            By Sergey Zagoruyko

            +
            + +
            +

            Wide Residual Networks

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +# load WRN-50-2:
            +model = torch.hub.load('pytorch/vision:v0.10.0', 'wide_resnet50_2', pretrained=True)
            +# or WRN-101-2
            +model = torch.hub.load('pytorch/vision:v0.10.0', 'wide_resnet101_2', pretrained=True)
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            Here’s a sample execution.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +preprocess = transforms.Compose([
            +    transforms.Resize(256),
            +    transforms.CenterCrop(224),
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)
            +# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes
            +print(output[0])
            +# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
            +probabilities = torch.nn.functional.softmax(output[0], dim=0)
            +print(probabilities)
            +
            + +
            # Download ImageNet labels
            +!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt
            +
            + +
            # Read the categories
            +with open("imagenet_classes.txt", "r") as f:
            +    categories = [s.strip() for s in f.readlines()]
            +# Show top categories per image
            +top5_prob, top5_catid = torch.topk(probabilities, 5)
            +for i in range(top5_prob.size(0)):
            +    print(categories[top5_catid[i]], top5_prob[i].item())
            +
            + +

            Model Description

            + +

            Wide Residual networks simply have increased number of channels compared to ResNet. +Otherwise the architecture is the same. Deeper ImageNet models with bottleneck +block have increased number of channels in the inner 3x3 convolution.

            + +

            The wide_resnet50_2 and wide_resnet101_2 models were trained in FP16 with +mixed precision training using SGD with warm restarts. Checkpoints have weights in +half precision (except batch norm) for smaller size, and can be used in FP32 models too.

            + + + + + + + + + + + + + + + + + + + + + + + + +
            Model structureTop-1 errorTop-5 error# parameters
            wide_resnet50_221.495.9168.9M
            wide_resnet101_221.165.72126.9M
            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/sigsep_open-unmix-pytorch_umx/index.html b/hub/sigsep_open-unmix-pytorch_umx/index.html new file mode 100644 index 000000000000..89af7983402a --- /dev/null +++ b/hub/sigsep_open-unmix-pytorch_umx/index.html @@ -0,0 +1,411 @@ + + + + + + + + + + + + + Open-Unmix | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + Open-Unmix +

            + +
            +
            +

            By Inria

            +
            + +
            +

            Reference implementation for music source separation

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            # assuming you have a PyTorch >=1.6.0 installed
            +pip install -q torchaudio
            +
            + +
            import torch
            +
            +# loading umxhq four target separator
            +separator = torch.hub.load('sigsep/open-unmix-pytorch', 'umxhq')
            +
            +# generate random audio
            +# ... with shape (nb_samples, nb_channels, nb_timesteps)
            +# ... and with the same sample rate as that of the separator
            +audio = torch.rand((1, 2, 100000))
            +original_sample_rate = separator.sample_rate
            +
            +# make sure to resample the audio to models' sample rate, separator.sample_rate, if the two are different
            +# resampler = torchaudio.transforms.Resample(original_sample_rate, separator.sample_rate)
            +# audio = resampler(audio)
            +
            +estimates = separator(audio)
            +# estimates.shape = (1, 4, 2, 100000)
            +
            + +

            Model Description

            + +

            Open-Unmix provides ready-to-use models that allow users to separate pop music into four stems: vocals, drums, bass and the remaining other instruments. The models were pre-trained on the freely available MUSDB18 dataset.

            + +

            Each target model is based on a three-layer bidirectional deep LSTM. The model learns to predict the magnitude spectrogram of a target source, like vocals, from the magnitude spectrogram of a mixture input. Internally, the prediction is obtained by applying a mask on the input. The model is optimized in the magnitude domain using mean squared error.

            + +

            A Separator meta-model (as shown in the code example above) puts together multiple Open-unmix spectrogram models for each desired target, and combines their output through a multichannel generalized Wiener filter, before application of inverse STFTs using torchaudio. +The filtering is differentiable (but parameter-free) version of norbert.

            + +

            Pre-trained Separator models

            + +
              +
            • +

              umxhq (default) trained on MUSDB18-HQ which comprises the same tracks as in MUSDB18 but un-compressed which yield in a full bandwidth of 22050 Hz.

              +
            • +
            • +

              umx is trained on the regular MUSDB18 which is bandwidth limited to 16 kHz due to AAC compression. This model should be used for comparison with other (older) methods for evaluation in SiSEC18.

              +
            • +
            + +

            Furthermore, we provide a model for speech enhancement trained by Sony Corporation

            + + + +

            All three models are also available as spectrogram (core) models, which take magnitude spectrogram inputs and ouput separated spectrograms. +These models can be loaded using umxhq_spec, umx_spec and umxse_spec.

            + +

            Details

            + +

            For additional examples, documentation and usage examples, please visit this the github repo.

            + +

            Furthermore, the models and all utility function to preprocess, read and save audio stems, are available in a python package that can be installed via

            + +
            pip install openunmix
            +
            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/simplenet/index.html b/hub/simplenet/index.html new file mode 100644 index 000000000000..ea1f7714dbc8 --- /dev/null +++ b/hub/simplenet/index.html @@ -0,0 +1,518 @@ + + + + + + + + + + + + + SimpleNet | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + SimpleNet +

            + +
            +
            +

            By Seyyed Hossein Hasanpour

            +
            + +
            +

            Lets Keep it simple, Using simple architectures to outperform deeper and more complex architectures

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            import torch
            +model = torch.hub.load("coderx7/simplenet_pytorch:v1.0.0", "simplenetv1_5m_m1", pretrained=True)
            +# or any of these variants
            +# model = torch.hub.load("coderx7/simplenet_pytorch:v1.0.0", "simplenetv1_5m_m2", pretrained=True)
            +# model = torch.hub.load("coderx7/simplenet_pytorch:v1.0.0", "simplenetv1_9m_m1", pretrained=True)
            +# model = torch.hub.load("coderx7/simplenet_pytorch:v1.0.0", "simplenetv1_9m_m2", pretrained=True)
            +# model = torch.hub.load("coderx7/simplenet_pytorch:v1.0.0", "simplenetv1_small_m1_05", pretrained=True)
            +# model = torch.hub.load("coderx7/simplenet_pytorch:v1.0.0", "simplenetv1_small_m2_05", pretrained=True)
            +# model = torch.hub.load("coderx7/simplenet_pytorch:v1.0.0", "simplenetv1_small_m1_075", pretrained=True)
            +# model = torch.hub.load("coderx7/simplenet_pytorch:v1.0.0", "simplenetv1_small_m2_075", pretrained=True)
            +model.eval()
            +
            + +

            All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] +and std = [0.229, 0.224, 0.225].

            + +

            Here’s a sample execution.

            + +
            # Download an example image from the pytorch website
            +import urllib
            +url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            +try: urllib.URLopener().retrieve(url, filename)
            +except: urllib.request.urlretrieve(url, filename)
            +
            + +
            # sample execution (requires torchvision)
            +from PIL import Image
            +from torchvision import transforms
            +input_image = Image.open(filename)
            +preprocess = transforms.Compose([
            +    transforms.Resize(256),
            +    transforms.CenterCrop(224),
            +    transforms.ToTensor(),
            +    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
            +])
            +input_tensor = preprocess(input_image)
            +input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
            +
            +# move the input and model to GPU for speed if available
            +if torch.cuda.is_available():
            +    input_batch = input_batch.to('cuda')
            +    model.to('cuda')
            +
            +with torch.no_grad():
            +    output = model(input_batch)
            +# Tensor of shape 1000, with confidence scores over ImageNet's 1000 classes
            +print(output[0])
            +# The output has unnormalized scores. To get probabilities, you can run a softmax on it.
            +probabilities = torch.nn.functional.softmax(output[0], dim=0)
            +print(probabilities)
            +
            + +
            # Download ImageNet labels
            +!wget https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt
            +
            + +
            # Read the categories
            +with open("imagenet_classes.txt", "r") as f:
            +    categories = [s.strip() for s in f.readlines()]
            +# Show top categories per image
            +top5_prob, top5_catid = torch.topk(probabilities, 5)
            +for i in range(top5_prob.size(0)):
            +    print(categories[top5_catid[i]], top5_prob[i].item())
            +
            + +

            Model Description

            + +

            SimpleNet models were proposed in “Lets Keep it simple, Using simple architectures to outperform deeper and more complex architectures”.
            +Here we have the 8 versions of simplenet models, which contains 1.5m, 3.2m, 5.7m and 9.5m parameters respectively.
            +Detailed model architectures can be found in Table 1 and Table 2.
            +Their 1-crop errors on ImageNet dataset with pretrained models are listed below.

            + +

            The m2 variants

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            Model structureTop-1 errorsTop-5 errors
            simplenetv1_small_m2_0538.3316.512
            simplenetv1_small_m2_07531.49411.85
            simplenetv1_5m_m227.979.676
            simplenetv1_9m_m225.778.252
            + +

            The m1 variants

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            Model structureTop-1 errorsTop-5 errors
            simplenetv1_small_m1_0538.87817.012
            simplenetv1_small_m1_07532.21612.282
            simplenetv1_5m_m128.45210.06
            simplenetv1_9m_m126.2088.514
            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/snakers4_silero-models_stt/index.html b/hub/snakers4_silero-models_stt/index.html new file mode 100644 index 000000000000..3eda3cf67b37 --- /dev/null +++ b/hub/snakers4_silero-models_stt/index.html @@ -0,0 +1,401 @@ + + + + + + + + + + + + + Silero Speech-To-Text Models | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + Silero Speech-To-Text Models +

            + +
            +
            +

            By Silero AI Team

            +
            + +
            +

            A set of compact enterprise-grade pre-trained STT Models for multiple languages.

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            # this assumes that you have a proper version of PyTorch already installed
            +pip install -q torchaudio omegaconf soundfile
            +
            + +
            import torch
            +import zipfile
            +import torchaudio
            +from glob import glob
            +
            +device = torch.device('cpu')  # gpu also works, but our models are fast enough for CPU
            +
            +model, decoder, utils = torch.hub.load(repo_or_dir='snakers4/silero-models',
            +                                       model='silero_stt',
            +                                       language='en', # also available 'de', 'es'
            +                                       device=device)
            +(read_batch, split_into_batches,
            + read_audio, prepare_model_input) = utils  # see function signature for details
            +
            +# download a single file, any format compatible with TorchAudio (soundfile backend)
            +torch.hub.download_url_to_file('https://opus-codec.org/static/examples/samples/speech_orig.wav',
            +                               dst ='speech_orig.wav', progress=True)
            +test_files = glob('speech_orig.wav')
            +batches = split_into_batches(test_files, batch_size=10)
            +input = prepare_model_input(read_batch(batches[0]),
            +                            device=device)
            +
            +output = model(input)
            +for example in output:
            +    print(decoder(example.cpu()))
            +
            + +

            Model Description

            + +

            Silero Speech-To-Text models provide enterprise grade STT in a compact form-factor for several commonly spoken languages. Unlike conventional ASR models our models are robust to a variety of dialects, codecs, domains, noises, lower sampling rates (for simplicity audio should be resampled to 16 kHz). The models consume a normalized audio in the form of samples (i.e. without any pre-processing except for normalization to -1 
 1) and output frames with token probabilities. We provide a decoder utility for simplicity (we could include it into our model itself, but scripted modules had problems with storing model artifacts i.e. labels during certain export scenarios).

            + +

            We hope that our efforts with Open-STT and Silero Models will bring the ImageNet moment in speech closer.

            + +

            Supported Languages and Formats

            + +

            As of this page update, the following languages are supported:

            + +
              +
            • English
            • +
            • German
            • +
            • Spanish
            • +
            + +

            To see the always up-to-date language list, please visit our repo and see the yml file for all available checkpoints.

            + +

            Additional Examples and Benchmarks

            + +

            For additional examples and other model formats please visit this link. For quality and performance benchmarks please see the wiki. These resources will be updated from time to time.

            + +

            References

            + + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/snakers4_silero-models_tts/index.html b/hub/snakers4_silero-models_tts/index.html new file mode 100644 index 000000000000..e4035c0f50c6 --- /dev/null +++ b/hub/snakers4_silero-models_tts/index.html @@ -0,0 +1,399 @@ + + + + + + + + + + + + + Silero Text-To-Speech Models | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + Silero Text-To-Speech Models +

            + +
            +
            +

            By Silero AI Team

            +
            + +
            +

            A set of compact enterprise-grade pre-trained TTS Models for multiple languages

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            # this assumes that you have a proper version of PyTorch already installed
            +pip install -q torchaudio omegaconf
            +
            + +
            import torch
            +
            +language = 'en'
            +speaker = 'lj_16khz'
            +device = torch.device('cpu')
            +model, symbols, sample_rate, example_text, apply_tts = torch.hub.load(repo_or_dir='snakers4/silero-models',
            +                                                                      model='silero_tts',
            +                                                                      language=language,
            +                                                                      speaker=speaker)
            +model = model.to(device)  # gpu or cpu
            +audio = apply_tts(texts=[example_text],
            +                  model=model,
            +                  sample_rate=sample_rate,
            +                  symbols=symbols,
            +                  device=device)
            +
            + +

            Model Description

            + +

            Silero Text-To-Speech models provide enterprise grade TTS in a compact form-factor for several commonly spoken languages:

            + +
              +
            • One-line usage
            • +
            • Naturally sounding speech
            • +
            • No GPU or training required
            • +
            • Minimalism and lack of dependencies
            • +
            • A library of voices in many languages
            • +
            • Support for 16kHz and 8kHz out of the box
            • +
            • High throughput on slow hardware. Decent performance on one CPU thread
            • +
            + +

            Supported Languages and Formats

            + +

            As of this page update, the speakers of the following languages are supported both in 8 kHz and 16 kHz:

            + +
              +
            • Russian (6 speakers)
            • +
            • English (1 speaker)
            • +
            • German (1 speaker)
            • +
            • Spanish (1 speaker)
            • +
            • French (1 speaker)
            • +
            + +

            To see the always up-to-date language list, please visit our repo and see the yml file for all available checkpoints.

            + +

            Additional Examples and Benchmarks

            + +

            For additional examples and other model formats please visit this link. For quality and performance benchmarks please see the wiki. These resources will be updated from time to time.

            + +

            References

            + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/snakers4_silero-vad_vad/index.html b/hub/snakers4_silero-vad_vad/index.html new file mode 100644 index 000000000000..4027156d50ea --- /dev/null +++ b/hub/snakers4_silero-vad_vad/index.html @@ -0,0 +1,389 @@ + + + + + + + + + + + + + Silero Voice Activity Detector | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + Silero Voice Activity Detector +

            + +
            +
            +

            By Silero AI Team

            +
            + +
            +

            Pre-trained Voice Activity Detector

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +
            # this assumes that you have a proper version of PyTorch already installed
            +pip install -q torchaudio
            +
            + +
            import torch
            +torch.set_num_threads(1)
            +
            +from IPython.display import Audio
            +from pprint import pprint
            +# download example
            +torch.hub.download_url_to_file('https://models.silero.ai/vad_models/en.wav', 'en_example.wav')
            +
            +model, utils = torch.hub.load(repo_or_dir='snakers4/silero-vad',
            +                              model='silero_vad',
            +                              force_reload=True)
            +
            +(get_speech_timestamps,
            + _, read_audio,
            + *_) = utils
            +
            +sampling_rate = 16000 # also accepts 8000
            +wav = read_audio('en_example.wav', sampling_rate=sampling_rate)
            +# get speech timestamps from full audio file
            +speech_timestamps = get_speech_timestamps(wav, model, sampling_rate=sampling_rate)
            +pprint(speech_timestamps)
            +
            + +

            Model Description

            + +

            Silero VAD: pre-trained enterprise-grade Voice Activity Detector (VAD). Enterprise-grade Speech Products made refreshingly simple (see our STT models). Each model is published separately.

            + +

            Currently, there are hardly any high quality / modern / free / public voice activity detectors except for WebRTC Voice Activity Detector (link). WebRTC though starts to show its age and it suffers from many false positives.

            + +

            (!!!) Important Notice (!!!) - the models are intended to run on CPU only and were optimized for performance on 1 CPU thread. Note that the model is quantized.

            + +

            Additional Examples and Benchmarks

            + +

            For additional examples and other model formats please visit this link and please refer to the extensive examples in the Colab format (including the streaming examples).

            + +

            References

            + +

            VAD model architectures are based on similar STT architectures.

            + + + + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hub/ultralytics_yolov5/index.html b/hub/ultralytics_yolov5/index.html new file mode 100644 index 000000000000..7ac0142230a9 --- /dev/null +++ b/hub/ultralytics_yolov5/index.html @@ -0,0 +1,553 @@ + + + + + + + + + + + + + YOLOv5 | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + +
            +
            + + + < + + +

            + YOLOv5 +

            + +
            +
            +

            By Ultralytics

            +
            + +
            +

            Ultralytics YOLOv5 🚀 for object detection, instance segmentation and image classification.

            + +
            +
            +
            +
            + +
            +
            +
            +
            +
            + + +
            +
            +
            +

            Before You Start

            + +

            Start from a Python>=3.8 environment with PyTorch>=1.7 installed. To install PyTorch see https://pytorch.org/get-started/locally/. To install YOLOv5 dependencies:

            + +
            pip install -U ultralytics
            +
            + +

            Model Description

            + +

            YOLO Model Comparison

            + +

            Ultralytics YOLOv5 🚀 is a cutting-edge, state-of-the-art (SOTA) model that builds upon the success of previous YOLO versions and introduces new features and improvements to further boost performance and flexibility. YOLOv5 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection, instance segmentation and image classification tasks.

            + +

            We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!

            + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            Modelsize
            (pixels)
            mAPval
            50-95
            mAPval
            50
            Speed
            CPU b1
            (ms)
            Speed
            V100 b1
            (ms)
            Speed
            V100 b32
            (ms)
            params
            (M)
            FLOPs
            @640 (B)
            YOLOv5n64028.045.7456.30.61.94.5
            YOLOv5s64037.456.8986.40.97.216.5
            YOLOv5m64045.464.12248.21.721.249.0
            YOLOv5l64049.067.343010.12.746.5109.1
            YOLOv5x64050.768.976612.14.886.7205.7
                     
            YOLOv5n6128036.054.41538.12.13.24.6
            YOLOv5s6128044.863.73858.23.612.616.8
            YOLOv5m6128051.369.388711.16.835.750.0
            YOLOv5l6128053.771.3178415.810.576.8111.4
            YOLOv5x6
            + [TTA]
            1280
            1536
            55.0
            55.8
            72.7
            72.7
            3136
            -
            26.2
            -
            19.4
            -
            140.7
            -
            209.8
            -
            + +
            + Table Notes + +- All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). +- **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
            Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +- **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.
            Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1` +- **TTA** [Test Time Augmentation](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) includes reflection and scale augmentations.
            Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` + +
            + +

            Load From PyTorch Hub

            + +

            This example loads a pretrained YOLOv5s model and passes an image for inference. YOLOv5 accepts URL, Filename, PIL, OpenCV, Numpy and PyTorch inputs, and returns detections in torch, pandas, and JSON output formats. See the YOLOv5 PyTorch Hub Tutorial for details.

            + +
            import torch
            +
            +# Model
            +model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
            +
            +# Images
            +imgs = ['https://ultralytics.com/images/zidane.jpg']  # batch of images
            +
            +# Inference
            +results = model(imgs)
            +
            +# Results
            +results.print()
            +results.save()  # or .show()
            +
            +results.xyxy[0]  # img1 predictions (tensor)
            +results.pandas().xyxy[0]  # img1 predictions (pandas)
            +#      xmin    ymin    xmax   ymax  confidence  class    name
            +# 0  749.50   43.50  1148.0  704.5    0.874023      0  person
            +# 1  433.50  433.50   517.5  714.5    0.687988     27     tie
            +# 2  114.75  195.75  1095.0  708.0    0.624512      0  person
            +# 3  986.00  304.00  1028.0  420.0    0.286865     27     tie
            +
            + +

            Citation

            + +

            If you use YOLOv5 or YOLOv5u in your research, please cite the Ultralytics YOLOv5 repository as follows:

            + +

            DOI

            + +
            @software{yolov5,
            +  title = {YOLOv5 by Ultralytics},
            +  author = {Glenn Jocher},
            +  year = {2020},
            +  version = {7.0},
            +  license = {AGPL-3.0},
            +  url = {https://github.com/ultralytics/yolov5},
            +  doi = {10.5281/zenodo.3908559},
            +  orcid = {0000-0001-5950-6979}
            +}
            +
            + +

            Contact

            + +

            For YOLOv5 bug reports and feature requests please visit GitHub Issues, and join our Discord community for questions and discussions!

            + +

             

            + + +
            +
            +
            +
            +
            +
            + +
            +
            +
            +
            +

            Docs

            +

            Access comprehensive developer documentation for PyTorch

            + View Docs +
            + +
            +

            Tutorials

            +

            Get in-depth tutorials for beginners and advanced developers

            + View Tutorials +
            + +
            +

            Resources

            +

            Find development resources and get your questions answered

            + View Resources +
            +
            +
            +
            + +
            + +
            + +
            +
            +
            +
            + + +
            +
            +
            + + +
            + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/imagenet_classes.txt b/imagenet_classes.txt new file mode 100644 index 000000000000..888d6f51dd77 --- /dev/null +++ b/imagenet_classes.txt @@ -0,0 +1,1000 @@ +tench +goldfish +great white shark +tiger shark +hammerhead +electric ray +stingray +cock +hen +ostrich +brambling +goldfinch +house finch +junco +indigo bunting +robin +bulbul +jay +magpie +chickadee +water ouzel +kite +bald eagle +vulture +great grey owl +European fire salamander +common newt +eft +spotted salamander +axolotl +bullfrog +tree frog +tailed frog +loggerhead +leatherback turtle +mud turtle +terrapin +box turtle +banded gecko +common iguana +American chameleon +whiptail +agama +frilled lizard +alligator lizard +Gila monster +green lizard +African chameleon +Komodo dragon +African crocodile +American alligator +triceratops +thunder snake +ringneck snake +hognose snake +green snake +king snake +garter snake +water snake +vine snake +night snake +boa constrictor +rock python +Indian cobra +green mamba +sea snake +horned viper +diamondback +sidewinder +trilobite +harvestman +scorpion +black and gold garden spider +barn spider +garden spider +black widow +tarantula +wolf spider +tick +centipede +black grouse +ptarmigan +ruffed grouse +prairie chicken +peacock +quail +partridge +African grey +macaw +sulphur-crested cockatoo +lorikeet +coucal +bee eater +hornbill +hummingbird +jacamar +toucan +drake +red-breasted merganser +goose +black swan +tusker +echidna +platypus +wallaby +koala +wombat +jellyfish +sea anemone +brain coral +flatworm +nematode +conch +snail +slug +sea slug +chiton +chambered nautilus +Dungeness crab +rock crab +fiddler crab +king crab +American lobster +spiny lobster +crayfish +hermit crab +isopod +white stork +black stork +spoonbill +flamingo +little blue heron +American egret +bittern +crane +limpkin +European gallinule +American coot +bustard +ruddy turnstone +red-backed sandpiper +redshank +dowitcher +oystercatcher +pelican +king penguin +albatross +grey whale +killer whale +dugong +sea lion +Chihuahua +Japanese spaniel +Maltese dog +Pekinese +Shih-Tzu +Blenheim spaniel +papillon +toy terrier +Rhodesian ridgeback +Afghan hound +basset +beagle +bloodhound +bluetick +black-and-tan coonhound +Walker hound +English foxhound +redbone +borzoi +Irish wolfhound +Italian greyhound +whippet +Ibizan hound +Norwegian elkhound +otterhound +Saluki +Scottish deerhound +Weimaraner +Staffordshire bullterrier +American Staffordshire terrier +Bedlington terrier +Border terrier +Kerry blue terrier +Irish terrier +Norfolk terrier +Norwich terrier +Yorkshire terrier +wire-haired fox terrier +Lakeland terrier +Sealyham terrier +Airedale +cairn +Australian terrier +Dandie Dinmont +Boston bull +miniature schnauzer +giant schnauzer +standard schnauzer +Scotch terrier +Tibetan terrier +silky terrier +soft-coated wheaten terrier +West Highland white terrier +Lhasa +flat-coated retriever +curly-coated retriever +golden retriever +Labrador retriever +Chesapeake Bay retriever +German short-haired pointer +vizsla +English setter +Irish setter +Gordon setter +Brittany spaniel +clumber +English springer +Welsh springer spaniel +cocker spaniel +Sussex spaniel +Irish water spaniel +kuvasz +schipperke +groenendael +malinois +briard +kelpie +komondor +Old English sheepdog +Shetland sheepdog +collie +Border collie +Bouvier des Flandres +Rottweiler +German shepherd +Doberman +miniature pinscher +Greater Swiss Mountain dog +Bernese mountain dog +Appenzeller +EntleBucher +boxer +bull mastiff +Tibetan mastiff +French bulldog +Great Dane +Saint Bernard +Eskimo dog +malamute +Siberian husky +dalmatian +affenpinscher +basenji +pug +Leonberg +Newfoundland +Great Pyrenees +Samoyed +Pomeranian +chow +keeshond +Brabancon griffon +Pembroke +Cardigan +toy poodle +miniature poodle +standard poodle +Mexican hairless +timber wolf +white wolf +red wolf +coyote +dingo +dhole +African hunting dog +hyena +red fox +kit fox +Arctic fox +grey fox +tabby +tiger cat +Persian cat +Siamese cat +Egyptian cat +cougar +lynx +leopard +snow leopard +jaguar +lion +tiger +cheetah +brown bear +American black bear +ice bear +sloth bear +mongoose +meerkat +tiger beetle +ladybug +ground beetle +long-horned beetle +leaf beetle +dung beetle +rhinoceros beetle +weevil +fly +bee +ant +grasshopper +cricket +walking stick +cockroach +mantis +cicada +leafhopper +lacewing +dragonfly +damselfly +admiral +ringlet +monarch +cabbage butterfly +sulphur butterfly +lycaenid +starfish +sea urchin +sea cucumber +wood rabbit +hare +Angora +hamster +porcupine +fox squirrel +marmot +beaver +guinea pig +sorrel +zebra +hog +wild boar +warthog +hippopotamus +ox +water buffalo +bison +ram +bighorn +ibex +hartebeest +impala +gazelle +Arabian camel +llama +weasel +mink +polecat +black-footed ferret +otter +skunk +badger +armadillo +three-toed sloth +orangutan +gorilla +chimpanzee +gibbon +siamang +guenon +patas +baboon +macaque +langur +colobus +proboscis monkey +marmoset +capuchin +howler monkey +titi +spider monkey +squirrel monkey +Madagascar cat +indri +Indian elephant +African elephant +lesser panda +giant panda +barracouta +eel +coho +rock beauty +anemone fish +sturgeon +gar +lionfish +puffer +abacus +abaya +academic gown +accordion +acoustic guitar +aircraft carrier +airliner +airship +altar +ambulance +amphibian +analog clock +apiary +apron +ashcan +assault rifle +backpack +bakery +balance beam +balloon +ballpoint +Band Aid +banjo +bannister +barbell +barber chair +barbershop +barn +barometer +barrel +barrow +baseball +basketball +bassinet +bassoon +bathing cap +bath towel +bathtub +beach wagon +beacon +beaker +bearskin +beer bottle +beer glass +bell cote +bib +bicycle-built-for-two +bikini +binder +binoculars +birdhouse +boathouse +bobsled +bolo tie +bonnet +bookcase +bookshop +bottlecap +bow +bow tie +brass +brassiere +breakwater +breastplate +broom +bucket +buckle +bulletproof vest +bullet train +butcher shop +cab +caldron +candle +cannon +canoe +can opener +cardigan +car mirror +carousel +carpenter's kit +carton +car wheel +cash machine +cassette +cassette player +castle +catamaran +CD player +cello +cellular telephone +chain +chainlink fence +chain mail +chain saw +chest +chiffonier +chime +china cabinet +Christmas stocking +church +cinema +cleaver +cliff dwelling +cloak +clog +cocktail shaker +coffee mug +coffeepot +coil +combination lock +computer keyboard +confectionery +container ship +convertible +corkscrew +cornet +cowboy boot +cowboy hat +cradle +crane +crash helmet +crate +crib +Crock Pot +croquet ball +crutch +cuirass +dam +desk +desktop computer +dial telephone +diaper +digital clock +digital watch +dining table +dishrag +dishwasher +disk brake +dock +dogsled +dome +doormat +drilling platform +drum +drumstick +dumbbell +Dutch oven +electric fan +electric guitar +electric locomotive +entertainment center +envelope +espresso maker +face powder +feather boa +file +fireboat +fire engine +fire screen +flagpole +flute +folding chair +football helmet +forklift +fountain +fountain pen +four-poster +freight car +French horn +frying pan +fur coat +garbage truck +gasmask +gas pump +goblet +go-kart +golf ball +golfcart +gondola +gong +gown +grand piano +greenhouse +grille +grocery store +guillotine +hair slide +hair spray +half track +hammer +hamper +hand blower +hand-held computer +handkerchief +hard disc +harmonica +harp +harvester +hatchet +holster +home theater +honeycomb +hook +hoopskirt +horizontal bar +horse cart +hourglass +iPod +iron +jack-o'-lantern +jean +jeep +jersey +jigsaw puzzle +jinrikisha +joystick +kimono +knee pad +knot +lab coat +ladle +lampshade +laptop +lawn mower +lens cap +letter opener +library +lifeboat +lighter +limousine +liner +lipstick +Loafer +lotion +loudspeaker +loupe +lumbermill +magnetic compass +mailbag +mailbox +maillot +maillot +manhole cover +maraca +marimba +mask +matchstick +maypole +maze +measuring cup +medicine chest +megalith +microphone +microwave +military uniform +milk can +minibus +miniskirt +minivan +missile +mitten +mixing bowl +mobile home +Model T +modem +monastery +monitor +moped +mortar +mortarboard +mosque +mosquito net +motor scooter +mountain bike +mountain tent +mouse +mousetrap +moving van +muzzle +nail +neck brace +necklace +nipple +notebook +obelisk +oboe +ocarina +odometer +oil filter +organ +oscilloscope +overskirt +oxcart +oxygen mask +packet +paddle +paddlewheel +padlock +paintbrush +pajama +palace +panpipe +paper towel +parachute +parallel bars +park bench +parking meter +passenger car +patio +pay-phone +pedestal +pencil box +pencil sharpener +perfume +Petri dish +photocopier +pick +pickelhaube +picket fence +pickup +pier +piggy bank +pill bottle +pillow +ping-pong ball +pinwheel +pirate +pitcher +plane +planetarium +plastic bag +plate rack +plow +plunger +Polaroid camera +pole +police van +poncho +pool table +pop bottle +pot +potter's wheel +power drill +prayer rug +printer +prison +projectile +projector +puck +punching bag +purse +quill +quilt +racer +racket +radiator +radio +radio telescope +rain barrel +recreational vehicle +reel +reflex camera +refrigerator +remote control +restaurant +revolver +rifle +rocking chair +rotisserie +rubber eraser +rugby ball +rule +running shoe +safe +safety pin +saltshaker +sandal +sarong +sax +scabbard +scale +school bus +schooner +scoreboard +screen +screw +screwdriver +seat belt +sewing machine +shield +shoe shop +shoji +shopping basket +shopping cart +shovel +shower cap +shower curtain +ski +ski mask +sleeping bag +slide rule +sliding door +slot +snorkel +snowmobile +snowplow +soap dispenser +soccer ball +sock +solar dish +sombrero +soup bowl +space bar +space heater +space shuttle +spatula +speedboat +spider web +spindle +sports car +spotlight +stage +steam locomotive +steel arch bridge +steel drum +stethoscope +stole +stone wall +stopwatch +stove +strainer +streetcar +stretcher +studio couch +stupa +submarine +suit +sundial +sunglass +sunglasses +sunscreen +suspension bridge +swab +sweatshirt +swimming trunks +swing +switch +syringe +table lamp +tank +tape player +teapot +teddy +television +tennis ball +thatch +theater curtain +thimble +thresher +throne +tile roof +toaster +tobacco shop +toilet seat +torch +totem pole +tow truck +toyshop +tractor +trailer truck +tray +trench coat +tricycle +trimaran +tripod +triumphal arch +trolleybus +trombone +tub +turnstile +typewriter keyboard +umbrella +unicycle +upright +vacuum +vase +vault +velvet +vending machine +vestment +viaduct +violin +volleyball +waffle iron +wall clock +wallet +wardrobe +warplane +washbasin +washer +water bottle +water jug +water tower +whiskey jug +whistle +wig +window screen +window shade +Windsor tie +wine bottle +wing +wok +wooden spoon +wool +worm fence +wreck +yawl +yurt +web site +comic book +crossword puzzle +street sign +traffic light +book jacket +menu +plate +guacamole +consomme +hot pot +trifle +ice cream +ice lolly +French loaf +bagel +pretzel +cheeseburger +hotdog +mashed potato +head cabbage +broccoli +cauliflower +zucchini +spaghetti squash +acorn squash +butternut squash +cucumber +artichoke +bell pepper +cardoon +mushroom +Granny Smith +strawberry +orange +lemon +fig +pineapple +banana +jackfruit +custard apple +pomegranate +hay +carbonara +chocolate sauce +dough +meat loaf +pizza +potpie +burrito +red wine +espresso +cup +eggnog +alp +bubble +cliff +coral reef +geyser +lakeside +promontory +sandbar +seashore +valley +volcano +ballplayer +groom +scuba diver +rapeseed +daisy +yellow lady's slipper +corn +acorn +hip +buckeye +coral fungus +agaric +gyromitra +stinkhorn +earthstar +hen-of-the-woods +bolete +ear +toilet tissue \ No newline at end of file diff --git a/javadoc/1.4.0/script-dir/jquery-ui.css b/javadoc/1.4.0/script-dir/jquery-ui.css index c4487b41cdbe..f23f3cc96537 100644 --- a/javadoc/1.4.0/script-dir/jquery-ui.css +++ b/javadoc/1.4.0/script-dir/jquery-ui.css @@ -577,6 +577,5 @@ a.ui-button:active, filter: Alpha(Opacity=30); /* support: IE8 */ } .ui-widget-shadow { - -webkit-box-shadow: -8px -8px 8px #aaaaaa; box-shadow: -8px -8px 8px #aaaaaa; } diff --git a/javadoc/1.4.0/script-dir/jquery-ui.min.css b/javadoc/1.4.0/script-dir/jquery-ui.min.css index 6be72f58c02d..4d4533fd2126 100644 --- a/javadoc/1.4.0/script-dir/jquery-ui.min.css +++ b/javadoc/1.4.0/script-dir/jquery-ui.min.css @@ -4,4 +4,4 @@ * To view and modify this theme, visit http://jqueryui.com/themeroller/?scope=&folderName=custom-theme&bgImgOpacityError=95&bgImgOpacityHighlight=55&bgImgOpacityActive=65&bgImgOpacityHover=75&bgImgOpacityDefault=75&bgImgOpacityContent=75&bgImgOpacityHeader=75&cornerRadiusShadow=8px&offsetLeftShadow=-8px&offsetTopShadow=-8px&thicknessShadow=8px&opacityShadow=30&bgImgOpacityShadow=0&bgTextureShadow=flat&bgColorShadow=%23aaaaaa&opacityOverlay=30&bgImgOpacityOverlay=0&bgTextureOverlay=flat&bgColorOverlay=%23aaaaaa&iconColorError=%23cd0a0a&fcError=%23cd0a0a&borderColorError=%23cd0a0a&bgTextureError=glass&bgColorError=%23fef1ec&iconColorHighlight=%232e83ff&fcHighlight=%23363636&borderColorHighlight=%23fcefa1&bgTextureHighlight=glass&bgColorHighlight=%23fbf9ee&iconColorActive=%23454545&fcActive=%23212121&borderColorActive=%23aaaaaa&bgTextureActive=glass&bgColorActive=%23dadada&iconColorHover=%23454545&fcHover=%23212121&borderColorHover=%23999999&bgTextureHover=glass&bgColorHover=%23dadada&iconColorDefault=%23888888&fcDefault=%23555555&borderColorDefault=%23d3d3d3&bgTextureDefault=glass&bgColorDefault=%23e6e6e6&iconColorContent=%23222222&fcContent=%23222222&borderColorContent=%23aaaaaa&bgTextureContent=flat&bgColorContent=%23ffffff&iconColorHeader=%23222222&fcHeader=%23222222&borderColorHeader=%23aaaaaa&bgTextureHeader=highlight_soft&bgColorHeader=%23cccccc&cornerRadius=4px&fwDefault=normal&fsDefault=1.1em&ffDefault=Verdana%2CArial%2Csans-serif * Copyright jQuery Foundation and other contributors; Licensed MIT */ -.ui-helper-hidden{display:none}.ui-helper-hidden-accessible{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.ui-helper-reset{margin:0;padding:0;border:0;outline:0;line-height:1.3;text-decoration:none;font-size:100%;list-style:none}.ui-helper-clearfix:before,.ui-helper-clearfix:after{content:"";display:table;border-collapse:collapse}.ui-helper-clearfix:after{clear:both}.ui-helper-zfix{width:100%;height:100%;top:0;left:0;position:absolute;opacity:0;filter:Alpha(Opacity=0)}.ui-front{z-index:100}.ui-state-disabled{cursor:default!important;pointer-events:none}.ui-icon{display:inline-block;vertical-align:middle;margin-top:-.25em;position:relative;text-indent:-99999px;overflow:hidden;background-repeat:no-repeat}.ui-widget-icon-block{left:50%;margin-left:-8px;display:block}.ui-widget-overlay{position:fixed;top:0;left:0;width:100%;height:100%}.ui-autocomplete{position:absolute;top:0;left:0;cursor:default}.ui-menu{list-style:none;padding:0;margin:0;display:block;outline:0}.ui-menu .ui-menu{position:absolute}.ui-menu .ui-menu-item{margin:0;cursor:pointer;list-style-image:url("data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7")}.ui-menu .ui-menu-item-wrapper{position:relative;padding:3px 1em 3px .4em}.ui-menu .ui-menu-divider{margin:5px 0;height:0;font-size:0;line-height:0;border-width:1px 0 0 0}.ui-menu .ui-state-focus,.ui-menu .ui-state-active{margin:-1px}.ui-menu-icons{position:relative}.ui-menu-icons .ui-menu-item-wrapper{padding-left:2em}.ui-menu .ui-icon{position:absolute;top:0;bottom:0;left:.2em;margin:auto 0}.ui-menu .ui-menu-icon{left:auto;right:0}.ui-widget{font-family:Verdana,Arial,sans-serif;font-size:1.1em}.ui-widget .ui-widget{font-size:1em}.ui-widget input,.ui-widget select,.ui-widget textarea,.ui-widget button{font-family:Verdana,Arial,sans-serif;font-size:1em}.ui-widget.ui-widget-content{border:1px solid #d3d3d3}.ui-widget-content{border:1px solid #aaa;background:#fff;color:#222}.ui-widget-content a{color:#222}.ui-widget-header{border:1px solid #aaa;background:#ccc url("images/ui-bg_highlight-soft_75_cccccc_1x100.png") 50% 50% repeat-x;color:#222;font-weight:bold}.ui-widget-header a{color:#222}.ui-state-default,.ui-widget-content .ui-state-default,.ui-widget-header .ui-state-default,.ui-button,html .ui-button.ui-state-disabled:hover,html .ui-button.ui-state-disabled:active{border:1px solid #d3d3d3;background:#e6e6e6 url("images/ui-bg_glass_75_e6e6e6_1x400.png") 50% 50% repeat-x;font-weight:normal;color:#555}.ui-state-default a,.ui-state-default a:link,.ui-state-default a:visited,a.ui-button,a:link.ui-button,a:visited.ui-button,.ui-button{color:#555;text-decoration:none}.ui-state-hover,.ui-widget-content .ui-state-hover,.ui-widget-header .ui-state-hover,.ui-state-focus,.ui-widget-content .ui-state-focus,.ui-widget-header .ui-state-focus,.ui-button:hover,.ui-button:focus{border:1px solid #999;background:#dadada url("images/ui-bg_glass_75_dadada_1x400.png") 50% 50% repeat-x;font-weight:normal;color:#212121}.ui-state-hover a,.ui-state-hover a:hover,.ui-state-hover a:link,.ui-state-hover a:visited,.ui-state-focus a,.ui-state-focus a:hover,.ui-state-focus a:link,.ui-state-focus a:visited,a.ui-button:hover,a.ui-button:focus{color:#212121;text-decoration:none}.ui-visual-focus{box-shadow:0 0 3px 1px rgb(94,158,214)}.ui-state-active,.ui-widget-content .ui-state-active,.ui-widget-header .ui-state-active,a.ui-button:active,.ui-button:active,.ui-button.ui-state-active:hover{border:1px solid #aaa;background:#dadada url("images/ui-bg_glass_65_dadada_1x400.png") 50% 50% repeat-x;font-weight:normal;color:#212121}.ui-icon-background,.ui-state-active .ui-icon-background{border:#aaa;background-color:#212121}.ui-state-active a,.ui-state-active a:link,.ui-state-active a:visited{color:#212121;text-decoration:none}.ui-state-highlight,.ui-widget-content .ui-state-highlight,.ui-widget-header .ui-state-highlight{border:1px solid #fcefa1;background:#fbf9ee url("images/ui-bg_glass_55_fbf9ee_1x400.png") 50% 50% repeat-x;color:#363636}.ui-state-checked{border:1px solid #fcefa1;background:#fbf9ee}.ui-state-highlight a,.ui-widget-content .ui-state-highlight a,.ui-widget-header .ui-state-highlight a{color:#363636}.ui-state-error,.ui-widget-content .ui-state-error,.ui-widget-header .ui-state-error{border:1px solid #cd0a0a;background:#fef1ec url("images/ui-bg_glass_95_fef1ec_1x400.png") 50% 50% repeat-x;color:#cd0a0a}.ui-state-error a,.ui-widget-content .ui-state-error a,.ui-widget-header .ui-state-error a{color:#cd0a0a}.ui-state-error-text,.ui-widget-content .ui-state-error-text,.ui-widget-header .ui-state-error-text{color:#cd0a0a}.ui-priority-primary,.ui-widget-content .ui-priority-primary,.ui-widget-header .ui-priority-primary{font-weight:bold}.ui-priority-secondary,.ui-widget-content .ui-priority-secondary,.ui-widget-header .ui-priority-secondary{opacity:.7;filter:Alpha(Opacity=70);font-weight:normal}.ui-state-disabled,.ui-widget-content .ui-state-disabled,.ui-widget-header .ui-state-disabled{opacity:.35;filter:Alpha(Opacity=35);background-image:none}.ui-state-disabled .ui-icon{filter:Alpha(Opacity=35)}.ui-icon{width:16px;height:16px}.ui-icon,.ui-widget-content .ui-icon{background-image:url("images/ui-icons_222222_256x240.png")}.ui-widget-header .ui-icon{background-image:url("images/ui-icons_222222_256x240.png")}.ui-state-hover .ui-icon,.ui-state-focus .ui-icon,.ui-button:hover .ui-icon,.ui-button:focus .ui-icon{background-image:url("images/ui-icons_454545_256x240.png")}.ui-state-active .ui-icon,.ui-button:active .ui-icon{background-image:url("images/ui-icons_454545_256x240.png")}.ui-state-highlight .ui-icon,.ui-button .ui-state-highlight.ui-icon{background-image:url("images/ui-icons_2e83ff_256x240.png")}.ui-state-error .ui-icon,.ui-state-error-text .ui-icon{background-image:url("images/ui-icons_cd0a0a_256x240.png")}.ui-button .ui-icon{background-image:url("images/ui-icons_888888_256x240.png")}.ui-icon-blank{background-position:16px 16px}.ui-icon-caret-1-n{background-position:0 0}.ui-icon-caret-1-ne{background-position:-16px 0}.ui-icon-caret-1-e{background-position:-32px 0}.ui-icon-caret-1-se{background-position:-48px 0}.ui-icon-caret-1-s{background-position:-65px 0}.ui-icon-caret-1-sw{background-position:-80px 0}.ui-icon-caret-1-w{background-position:-96px 0}.ui-icon-caret-1-nw{background-position:-112px 0}.ui-icon-caret-2-n-s{background-position:-128px 0}.ui-icon-caret-2-e-w{background-position:-144px 0}.ui-icon-triangle-1-n{background-position:0 -16px}.ui-icon-triangle-1-ne{background-position:-16px -16px}.ui-icon-triangle-1-e{background-position:-32px -16px}.ui-icon-triangle-1-se{background-position:-48px -16px}.ui-icon-triangle-1-s{background-position:-65px -16px}.ui-icon-triangle-1-sw{background-position:-80px -16px}.ui-icon-triangle-1-w{background-position:-96px -16px}.ui-icon-triangle-1-nw{background-position:-112px -16px}.ui-icon-triangle-2-n-s{background-position:-128px -16px}.ui-icon-triangle-2-e-w{background-position:-144px -16px}.ui-icon-arrow-1-n{background-position:0 -32px}.ui-icon-arrow-1-ne{background-position:-16px -32px}.ui-icon-arrow-1-e{background-position:-32px -32px}.ui-icon-arrow-1-se{background-position:-48px -32px}.ui-icon-arrow-1-s{background-position:-65px -32px}.ui-icon-arrow-1-sw{background-position:-80px -32px}.ui-icon-arrow-1-w{background-position:-96px -32px}.ui-icon-arrow-1-nw{background-position:-112px -32px}.ui-icon-arrow-2-n-s{background-position:-128px -32px}.ui-icon-arrow-2-ne-sw{background-position:-144px -32px}.ui-icon-arrow-2-e-w{background-position:-160px -32px}.ui-icon-arrow-2-se-nw{background-position:-176px -32px}.ui-icon-arrowstop-1-n{background-position:-192px -32px}.ui-icon-arrowstop-1-e{background-position:-208px -32px}.ui-icon-arrowstop-1-s{background-position:-224px -32px}.ui-icon-arrowstop-1-w{background-position:-240px -32px}.ui-icon-arrowthick-1-n{background-position:1px -48px}.ui-icon-arrowthick-1-ne{background-position:-16px -48px}.ui-icon-arrowthick-1-e{background-position:-32px -48px}.ui-icon-arrowthick-1-se{background-position:-48px -48px}.ui-icon-arrowthick-1-s{background-position:-64px -48px}.ui-icon-arrowthick-1-sw{background-position:-80px -48px}.ui-icon-arrowthick-1-w{background-position:-96px -48px}.ui-icon-arrowthick-1-nw{background-position:-112px -48px}.ui-icon-arrowthick-2-n-s{background-position:-128px -48px}.ui-icon-arrowthick-2-ne-sw{background-position:-144px -48px}.ui-icon-arrowthick-2-e-w{background-position:-160px -48px}.ui-icon-arrowthick-2-se-nw{background-position:-176px -48px}.ui-icon-arrowthickstop-1-n{background-position:-192px -48px}.ui-icon-arrowthickstop-1-e{background-position:-208px -48px}.ui-icon-arrowthickstop-1-s{background-position:-224px -48px}.ui-icon-arrowthickstop-1-w{background-position:-240px -48px}.ui-icon-arrowreturnthick-1-w{background-position:0 -64px}.ui-icon-arrowreturnthick-1-n{background-position:-16px -64px}.ui-icon-arrowreturnthick-1-e{background-position:-32px -64px}.ui-icon-arrowreturnthick-1-s{background-position:-48px -64px}.ui-icon-arrowreturn-1-w{background-position:-64px -64px}.ui-icon-arrowreturn-1-n{background-position:-80px -64px}.ui-icon-arrowreturn-1-e{background-position:-96px -64px}.ui-icon-arrowreturn-1-s{background-position:-112px -64px}.ui-icon-arrowrefresh-1-w{background-position:-128px -64px}.ui-icon-arrowrefresh-1-n{background-position:-144px -64px}.ui-icon-arrowrefresh-1-e{background-position:-160px -64px}.ui-icon-arrowrefresh-1-s{background-position:-176px -64px}.ui-icon-arrow-4{background-position:0 -80px}.ui-icon-arrow-4-diag{background-position:-16px -80px}.ui-icon-extlink{background-position:-32px -80px}.ui-icon-newwin{background-position:-48px -80px}.ui-icon-refresh{background-position:-64px -80px}.ui-icon-shuffle{background-position:-80px -80px}.ui-icon-transfer-e-w{background-position:-96px -80px}.ui-icon-transferthick-e-w{background-position:-112px -80px}.ui-icon-folder-collapsed{background-position:0 -96px}.ui-icon-folder-open{background-position:-16px -96px}.ui-icon-document{background-position:-32px -96px}.ui-icon-document-b{background-position:-48px -96px}.ui-icon-note{background-position:-64px -96px}.ui-icon-mail-closed{background-position:-80px -96px}.ui-icon-mail-open{background-position:-96px -96px}.ui-icon-suitcase{background-position:-112px -96px}.ui-icon-comment{background-position:-128px -96px}.ui-icon-person{background-position:-144px -96px}.ui-icon-print{background-position:-160px -96px}.ui-icon-trash{background-position:-176px -96px}.ui-icon-locked{background-position:-192px -96px}.ui-icon-unlocked{background-position:-208px -96px}.ui-icon-bookmark{background-position:-224px -96px}.ui-icon-tag{background-position:-240px -96px}.ui-icon-home{background-position:0 -112px}.ui-icon-flag{background-position:-16px -112px}.ui-icon-calendar{background-position:-32px -112px}.ui-icon-cart{background-position:-48px -112px}.ui-icon-pencil{background-position:-64px -112px}.ui-icon-clock{background-position:-80px -112px}.ui-icon-disk{background-position:-96px -112px}.ui-icon-calculator{background-position:-112px -112px}.ui-icon-zoomin{background-position:-128px -112px}.ui-icon-zoomout{background-position:-144px -112px}.ui-icon-search{background-position:-160px -112px}.ui-icon-wrench{background-position:-176px -112px}.ui-icon-gear{background-position:-192px -112px}.ui-icon-heart{background-position:-208px -112px}.ui-icon-star{background-position:-224px -112px}.ui-icon-link{background-position:-240px -112px}.ui-icon-cancel{background-position:0 -128px}.ui-icon-plus{background-position:-16px -128px}.ui-icon-plusthick{background-position:-32px -128px}.ui-icon-minus{background-position:-48px -128px}.ui-icon-minusthick{background-position:-64px -128px}.ui-icon-close{background-position:-80px -128px}.ui-icon-closethick{background-position:-96px -128px}.ui-icon-key{background-position:-112px -128px}.ui-icon-lightbulb{background-position:-128px -128px}.ui-icon-scissors{background-position:-144px -128px}.ui-icon-clipboard{background-position:-160px -128px}.ui-icon-copy{background-position:-176px -128px}.ui-icon-contact{background-position:-192px -128px}.ui-icon-image{background-position:-208px -128px}.ui-icon-video{background-position:-224px -128px}.ui-icon-script{background-position:-240px -128px}.ui-icon-alert{background-position:0 -144px}.ui-icon-info{background-position:-16px -144px}.ui-icon-notice{background-position:-32px -144px}.ui-icon-help{background-position:-48px -144px}.ui-icon-check{background-position:-64px -144px}.ui-icon-bullet{background-position:-80px -144px}.ui-icon-radio-on{background-position:-96px -144px}.ui-icon-radio-off{background-position:-112px -144px}.ui-icon-pin-w{background-position:-128px -144px}.ui-icon-pin-s{background-position:-144px -144px}.ui-icon-play{background-position:0 -160px}.ui-icon-pause{background-position:-16px -160px}.ui-icon-seek-next{background-position:-32px -160px}.ui-icon-seek-prev{background-position:-48px -160px}.ui-icon-seek-end{background-position:-64px -160px}.ui-icon-seek-start{background-position:-80px -160px}.ui-icon-seek-first{background-position:-80px -160px}.ui-icon-stop{background-position:-96px -160px}.ui-icon-eject{background-position:-112px -160px}.ui-icon-volume-off{background-position:-128px -160px}.ui-icon-volume-on{background-position:-144px -160px}.ui-icon-power{background-position:0 -176px}.ui-icon-signal-diag{background-position:-16px -176px}.ui-icon-signal{background-position:-32px -176px}.ui-icon-battery-0{background-position:-48px -176px}.ui-icon-battery-1{background-position:-64px -176px}.ui-icon-battery-2{background-position:-80px -176px}.ui-icon-battery-3{background-position:-96px -176px}.ui-icon-circle-plus{background-position:0 -192px}.ui-icon-circle-minus{background-position:-16px -192px}.ui-icon-circle-close{background-position:-32px -192px}.ui-icon-circle-triangle-e{background-position:-48px -192px}.ui-icon-circle-triangle-s{background-position:-64px -192px}.ui-icon-circle-triangle-w{background-position:-80px -192px}.ui-icon-circle-triangle-n{background-position:-96px -192px}.ui-icon-circle-arrow-e{background-position:-112px -192px}.ui-icon-circle-arrow-s{background-position:-128px -192px}.ui-icon-circle-arrow-w{background-position:-144px -192px}.ui-icon-circle-arrow-n{background-position:-160px -192px}.ui-icon-circle-zoomin{background-position:-176px -192px}.ui-icon-circle-zoomout{background-position:-192px -192px}.ui-icon-circle-check{background-position:-208px -192px}.ui-icon-circlesmall-plus{background-position:0 -208px}.ui-icon-circlesmall-minus{background-position:-16px -208px}.ui-icon-circlesmall-close{background-position:-32px -208px}.ui-icon-squaresmall-plus{background-position:-48px -208px}.ui-icon-squaresmall-minus{background-position:-64px -208px}.ui-icon-squaresmall-close{background-position:-80px -208px}.ui-icon-grip-dotted-vertical{background-position:0 -224px}.ui-icon-grip-dotted-horizontal{background-position:-16px -224px}.ui-icon-grip-solid-vertical{background-position:-32px -224px}.ui-icon-grip-solid-horizontal{background-position:-48px -224px}.ui-icon-gripsmall-diagonal-se{background-position:-64px -224px}.ui-icon-grip-diagonal-se{background-position:-80px -224px}.ui-corner-all,.ui-corner-top,.ui-corner-left,.ui-corner-tl{border-top-left-radius:4px}.ui-corner-all,.ui-corner-top,.ui-corner-right,.ui-corner-tr{border-top-right-radius:4px}.ui-corner-all,.ui-corner-bottom,.ui-corner-left,.ui-corner-bl{border-bottom-left-radius:4px}.ui-corner-all,.ui-corner-bottom,.ui-corner-right,.ui-corner-br{border-bottom-right-radius:4px}.ui-widget-overlay{background:#aaa;opacity:.3;filter:Alpha(Opacity=30)}.ui-widget-shadow{-webkit-box-shadow:-8px -8px 8px #aaa;box-shadow:-8px -8px 8px #aaa} \ No newline at end of file +.ui-helper-hidden{display:none}.ui-helper-hidden-accessible{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.ui-helper-reset{margin:0;padding:0;border:0;outline:0;line-height:1.3;text-decoration:none;font-size:100%;list-style:none}.ui-helper-clearfix:before,.ui-helper-clearfix:after{content:"";display:table;border-collapse:collapse}.ui-helper-clearfix:after{clear:both}.ui-helper-zfix{width:100%;height:100%;top:0;left:0;position:absolute;opacity:0;filter:Alpha(Opacity=0)}.ui-front{z-index:100}.ui-state-disabled{cursor:default!important;pointer-events:none}.ui-icon{display:inline-block;vertical-align:middle;margin-top:-.25em;position:relative;text-indent:-99999px;overflow:hidden;background-repeat:no-repeat}.ui-widget-icon-block{left:50%;margin-left:-8px;display:block}.ui-widget-overlay{position:fixed;top:0;left:0;width:100%;height:100%}.ui-autocomplete{position:absolute;top:0;left:0;cursor:default}.ui-menu{list-style:none;padding:0;margin:0;display:block;outline:0}.ui-menu .ui-menu{position:absolute}.ui-menu .ui-menu-item{margin:0;cursor:pointer;list-style-image:url("data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7")}.ui-menu .ui-menu-item-wrapper{position:relative;padding:3px 1em 3px .4em}.ui-menu .ui-menu-divider{margin:5px 0;height:0;font-size:0;line-height:0;border-width:1px 0 0 0}.ui-menu .ui-state-focus,.ui-menu .ui-state-active{margin:-1px}.ui-menu-icons{position:relative}.ui-menu-icons .ui-menu-item-wrapper{padding-left:2em}.ui-menu .ui-icon{position:absolute;top:0;bottom:0;left:.2em;margin:auto 0}.ui-menu .ui-menu-icon{left:auto;right:0}.ui-widget{font-family:Verdana,Arial,sans-serif;font-size:1.1em}.ui-widget .ui-widget{font-size:1em}.ui-widget input,.ui-widget select,.ui-widget textarea,.ui-widget button{font-family:Verdana,Arial,sans-serif;font-size:1em}.ui-widget.ui-widget-content{border:1px solid #d3d3d3}.ui-widget-content{border:1px solid #aaa;background:#fff;color:#222}.ui-widget-content a{color:#222}.ui-widget-header{border:1px solid #aaa;background:#ccc url("images/ui-bg_highlight-soft_75_cccccc_1x100.png") 50% 50% repeat-x;color:#222;font-weight:bold}.ui-widget-header a{color:#222}.ui-state-default,.ui-widget-content .ui-state-default,.ui-widget-header .ui-state-default,.ui-button,html .ui-button.ui-state-disabled:hover,html .ui-button.ui-state-disabled:active{border:1px solid #d3d3d3;background:#e6e6e6 url("images/ui-bg_glass_75_e6e6e6_1x400.png") 50% 50% repeat-x;font-weight:normal;color:#555}.ui-state-default a,.ui-state-default a:link,.ui-state-default a:visited,a.ui-button,a:link.ui-button,a:visited.ui-button,.ui-button{color:#555;text-decoration:none}.ui-state-hover,.ui-widget-content .ui-state-hover,.ui-widget-header .ui-state-hover,.ui-state-focus,.ui-widget-content .ui-state-focus,.ui-widget-header .ui-state-focus,.ui-button:hover,.ui-button:focus{border:1px solid #999;background:#dadada url("images/ui-bg_glass_75_dadada_1x400.png") 50% 50% repeat-x;font-weight:normal;color:#212121}.ui-state-hover a,.ui-state-hover a:hover,.ui-state-hover a:link,.ui-state-hover a:visited,.ui-state-focus a,.ui-state-focus a:hover,.ui-state-focus a:link,.ui-state-focus a:visited,a.ui-button:hover,a.ui-button:focus{color:#212121;text-decoration:none}.ui-visual-focus{box-shadow:0 0 3px 1px rgb(94,158,214)}.ui-state-active,.ui-widget-content .ui-state-active,.ui-widget-header .ui-state-active,a.ui-button:active,.ui-button:active,.ui-button.ui-state-active:hover{border:1px solid #aaa;background:#dadada url("images/ui-bg_glass_65_dadada_1x400.png") 50% 50% repeat-x;font-weight:normal;color:#212121}.ui-icon-background,.ui-state-active .ui-icon-background{border:#aaa;background-color:#212121}.ui-state-active a,.ui-state-active a:link,.ui-state-active a:visited{color:#212121;text-decoration:none}.ui-state-highlight,.ui-widget-content .ui-state-highlight,.ui-widget-header .ui-state-highlight{border:1px solid #fcefa1;background:#fbf9ee url("images/ui-bg_glass_55_fbf9ee_1x400.png") 50% 50% repeat-x;color:#363636}.ui-state-checked{border:1px solid #fcefa1;background:#fbf9ee}.ui-state-highlight a,.ui-widget-content .ui-state-highlight a,.ui-widget-header .ui-state-highlight a{color:#363636}.ui-state-error,.ui-widget-content .ui-state-error,.ui-widget-header .ui-state-error{border:1px solid #cd0a0a;background:#fef1ec url("images/ui-bg_glass_95_fef1ec_1x400.png") 50% 50% repeat-x;color:#cd0a0a}.ui-state-error a,.ui-widget-content .ui-state-error a,.ui-widget-header .ui-state-error a{color:#cd0a0a}.ui-state-error-text,.ui-widget-content .ui-state-error-text,.ui-widget-header .ui-state-error-text{color:#cd0a0a}.ui-priority-primary,.ui-widget-content .ui-priority-primary,.ui-widget-header .ui-priority-primary{font-weight:bold}.ui-priority-secondary,.ui-widget-content .ui-priority-secondary,.ui-widget-header .ui-priority-secondary{opacity:.7;filter:Alpha(Opacity=70);font-weight:normal}.ui-state-disabled,.ui-widget-content .ui-state-disabled,.ui-widget-header .ui-state-disabled{opacity:.35;filter:Alpha(Opacity=35);background-image:none}.ui-state-disabled .ui-icon{filter:Alpha(Opacity=35)}.ui-icon{width:16px;height:16px}.ui-icon,.ui-widget-content .ui-icon{background-image:url("images/ui-icons_222222_256x240.png")}.ui-widget-header .ui-icon{background-image:url("images/ui-icons_222222_256x240.png")}.ui-state-hover .ui-icon,.ui-state-focus .ui-icon,.ui-button:hover .ui-icon,.ui-button:focus .ui-icon{background-image:url("images/ui-icons_454545_256x240.png")}.ui-state-active .ui-icon,.ui-button:active .ui-icon{background-image:url("images/ui-icons_454545_256x240.png")}.ui-state-highlight .ui-icon,.ui-button .ui-state-highlight.ui-icon{background-image:url("images/ui-icons_2e83ff_256x240.png")}.ui-state-error .ui-icon,.ui-state-error-text .ui-icon{background-image:url("images/ui-icons_cd0a0a_256x240.png")}.ui-button .ui-icon{background-image:url("images/ui-icons_888888_256x240.png")}.ui-icon-blank{background-position:16px 16px}.ui-icon-caret-1-n{background-position:0 0}.ui-icon-caret-1-ne{background-position:-16px 0}.ui-icon-caret-1-e{background-position:-32px 0}.ui-icon-caret-1-se{background-position:-48px 0}.ui-icon-caret-1-s{background-position:-65px 0}.ui-icon-caret-1-sw{background-position:-80px 0}.ui-icon-caret-1-w{background-position:-96px 0}.ui-icon-caret-1-nw{background-position:-112px 0}.ui-icon-caret-2-n-s{background-position:-128px 0}.ui-icon-caret-2-e-w{background-position:-144px 0}.ui-icon-triangle-1-n{background-position:0 -16px}.ui-icon-triangle-1-ne{background-position:-16px -16px}.ui-icon-triangle-1-e{background-position:-32px -16px}.ui-icon-triangle-1-se{background-position:-48px -16px}.ui-icon-triangle-1-s{background-position:-65px -16px}.ui-icon-triangle-1-sw{background-position:-80px -16px}.ui-icon-triangle-1-w{background-position:-96px -16px}.ui-icon-triangle-1-nw{background-position:-112px -16px}.ui-icon-triangle-2-n-s{background-position:-128px -16px}.ui-icon-triangle-2-e-w{background-position:-144px -16px}.ui-icon-arrow-1-n{background-position:0 -32px}.ui-icon-arrow-1-ne{background-position:-16px -32px}.ui-icon-arrow-1-e{background-position:-32px -32px}.ui-icon-arrow-1-se{background-position:-48px -32px}.ui-icon-arrow-1-s{background-position:-65px -32px}.ui-icon-arrow-1-sw{background-position:-80px -32px}.ui-icon-arrow-1-w{background-position:-96px -32px}.ui-icon-arrow-1-nw{background-position:-112px -32px}.ui-icon-arrow-2-n-s{background-position:-128px -32px}.ui-icon-arrow-2-ne-sw{background-position:-144px -32px}.ui-icon-arrow-2-e-w{background-position:-160px -32px}.ui-icon-arrow-2-se-nw{background-position:-176px -32px}.ui-icon-arrowstop-1-n{background-position:-192px -32px}.ui-icon-arrowstop-1-e{background-position:-208px -32px}.ui-icon-arrowstop-1-s{background-position:-224px -32px}.ui-icon-arrowstop-1-w{background-position:-240px -32px}.ui-icon-arrowthick-1-n{background-position:1px -48px}.ui-icon-arrowthick-1-ne{background-position:-16px -48px}.ui-icon-arrowthick-1-e{background-position:-32px -48px}.ui-icon-arrowthick-1-se{background-position:-48px -48px}.ui-icon-arrowthick-1-s{background-position:-64px -48px}.ui-icon-arrowthick-1-sw{background-position:-80px -48px}.ui-icon-arrowthick-1-w{background-position:-96px -48px}.ui-icon-arrowthick-1-nw{background-position:-112px -48px}.ui-icon-arrowthick-2-n-s{background-position:-128px -48px}.ui-icon-arrowthick-2-ne-sw{background-position:-144px -48px}.ui-icon-arrowthick-2-e-w{background-position:-160px -48px}.ui-icon-arrowthick-2-se-nw{background-position:-176px -48px}.ui-icon-arrowthickstop-1-n{background-position:-192px -48px}.ui-icon-arrowthickstop-1-e{background-position:-208px -48px}.ui-icon-arrowthickstop-1-s{background-position:-224px -48px}.ui-icon-arrowthickstop-1-w{background-position:-240px -48px}.ui-icon-arrowreturnthick-1-w{background-position:0 -64px}.ui-icon-arrowreturnthick-1-n{background-position:-16px -64px}.ui-icon-arrowreturnthick-1-e{background-position:-32px -64px}.ui-icon-arrowreturnthick-1-s{background-position:-48px -64px}.ui-icon-arrowreturn-1-w{background-position:-64px -64px}.ui-icon-arrowreturn-1-n{background-position:-80px -64px}.ui-icon-arrowreturn-1-e{background-position:-96px -64px}.ui-icon-arrowreturn-1-s{background-position:-112px -64px}.ui-icon-arrowrefresh-1-w{background-position:-128px -64px}.ui-icon-arrowrefresh-1-n{background-position:-144px -64px}.ui-icon-arrowrefresh-1-e{background-position:-160px -64px}.ui-icon-arrowrefresh-1-s{background-position:-176px -64px}.ui-icon-arrow-4{background-position:0 -80px}.ui-icon-arrow-4-diag{background-position:-16px -80px}.ui-icon-extlink{background-position:-32px -80px}.ui-icon-newwin{background-position:-48px -80px}.ui-icon-refresh{background-position:-64px -80px}.ui-icon-shuffle{background-position:-80px -80px}.ui-icon-transfer-e-w{background-position:-96px -80px}.ui-icon-transferthick-e-w{background-position:-112px -80px}.ui-icon-folder-collapsed{background-position:0 -96px}.ui-icon-folder-open{background-position:-16px -96px}.ui-icon-document{background-position:-32px -96px}.ui-icon-document-b{background-position:-48px -96px}.ui-icon-note{background-position:-64px -96px}.ui-icon-mail-closed{background-position:-80px -96px}.ui-icon-mail-open{background-position:-96px -96px}.ui-icon-suitcase{background-position:-112px -96px}.ui-icon-comment{background-position:-128px -96px}.ui-icon-person{background-position:-144px -96px}.ui-icon-print{background-position:-160px -96px}.ui-icon-trash{background-position:-176px -96px}.ui-icon-locked{background-position:-192px -96px}.ui-icon-unlocked{background-position:-208px -96px}.ui-icon-bookmark{background-position:-224px -96px}.ui-icon-tag{background-position:-240px -96px}.ui-icon-home{background-position:0 -112px}.ui-icon-flag{background-position:-16px -112px}.ui-icon-calendar{background-position:-32px -112px}.ui-icon-cart{background-position:-48px -112px}.ui-icon-pencil{background-position:-64px -112px}.ui-icon-clock{background-position:-80px -112px}.ui-icon-disk{background-position:-96px -112px}.ui-icon-calculator{background-position:-112px -112px}.ui-icon-zoomin{background-position:-128px -112px}.ui-icon-zoomout{background-position:-144px -112px}.ui-icon-search{background-position:-160px -112px}.ui-icon-wrench{background-position:-176px -112px}.ui-icon-gear{background-position:-192px -112px}.ui-icon-heart{background-position:-208px -112px}.ui-icon-star{background-position:-224px -112px}.ui-icon-link{background-position:-240px -112px}.ui-icon-cancel{background-position:0 -128px}.ui-icon-plus{background-position:-16px -128px}.ui-icon-plusthick{background-position:-32px -128px}.ui-icon-minus{background-position:-48px -128px}.ui-icon-minusthick{background-position:-64px -128px}.ui-icon-close{background-position:-80px -128px}.ui-icon-closethick{background-position:-96px -128px}.ui-icon-key{background-position:-112px -128px}.ui-icon-lightbulb{background-position:-128px -128px}.ui-icon-scissors{background-position:-144px -128px}.ui-icon-clipboard{background-position:-160px -128px}.ui-icon-copy{background-position:-176px -128px}.ui-icon-contact{background-position:-192px -128px}.ui-icon-image{background-position:-208px -128px}.ui-icon-video{background-position:-224px -128px}.ui-icon-script{background-position:-240px -128px}.ui-icon-alert{background-position:0 -144px}.ui-icon-info{background-position:-16px -144px}.ui-icon-notice{background-position:-32px -144px}.ui-icon-help{background-position:-48px -144px}.ui-icon-check{background-position:-64px -144px}.ui-icon-bullet{background-position:-80px -144px}.ui-icon-radio-on{background-position:-96px -144px}.ui-icon-radio-off{background-position:-112px -144px}.ui-icon-pin-w{background-position:-128px -144px}.ui-icon-pin-s{background-position:-144px -144px}.ui-icon-play{background-position:0 -160px}.ui-icon-pause{background-position:-16px -160px}.ui-icon-seek-next{background-position:-32px -160px}.ui-icon-seek-prev{background-position:-48px -160px}.ui-icon-seek-end{background-position:-64px -160px}.ui-icon-seek-start{background-position:-80px -160px}.ui-icon-seek-first{background-position:-80px -160px}.ui-icon-stop{background-position:-96px -160px}.ui-icon-eject{background-position:-112px -160px}.ui-icon-volume-off{background-position:-128px -160px}.ui-icon-volume-on{background-position:-144px -160px}.ui-icon-power{background-position:0 -176px}.ui-icon-signal-diag{background-position:-16px -176px}.ui-icon-signal{background-position:-32px -176px}.ui-icon-battery-0{background-position:-48px -176px}.ui-icon-battery-1{background-position:-64px -176px}.ui-icon-battery-2{background-position:-80px -176px}.ui-icon-battery-3{background-position:-96px -176px}.ui-icon-circle-plus{background-position:0 -192px}.ui-icon-circle-minus{background-position:-16px -192px}.ui-icon-circle-close{background-position:-32px -192px}.ui-icon-circle-triangle-e{background-position:-48px -192px}.ui-icon-circle-triangle-s{background-position:-64px -192px}.ui-icon-circle-triangle-w{background-position:-80px -192px}.ui-icon-circle-triangle-n{background-position:-96px -192px}.ui-icon-circle-arrow-e{background-position:-112px -192px}.ui-icon-circle-arrow-s{background-position:-128px -192px}.ui-icon-circle-arrow-w{background-position:-144px -192px}.ui-icon-circle-arrow-n{background-position:-160px -192px}.ui-icon-circle-zoomin{background-position:-176px -192px}.ui-icon-circle-zoomout{background-position:-192px -192px}.ui-icon-circle-check{background-position:-208px -192px}.ui-icon-circlesmall-plus{background-position:0 -208px}.ui-icon-circlesmall-minus{background-position:-16px -208px}.ui-icon-circlesmall-close{background-position:-32px -208px}.ui-icon-squaresmall-plus{background-position:-48px -208px}.ui-icon-squaresmall-minus{background-position:-64px -208px}.ui-icon-squaresmall-close{background-position:-80px -208px}.ui-icon-grip-dotted-vertical{background-position:0 -224px}.ui-icon-grip-dotted-horizontal{background-position:-16px -224px}.ui-icon-grip-solid-vertical{background-position:-32px -224px}.ui-icon-grip-solid-horizontal{background-position:-48px -224px}.ui-icon-gripsmall-diagonal-se{background-position:-64px -224px}.ui-icon-grip-diagonal-se{background-position:-80px -224px}.ui-corner-all,.ui-corner-top,.ui-corner-left,.ui-corner-tl{border-top-left-radius:4px}.ui-corner-all,.ui-corner-top,.ui-corner-right,.ui-corner-tr{border-top-right-radius:4px}.ui-corner-all,.ui-corner-bottom,.ui-corner-left,.ui-corner-bl{border-bottom-left-radius:4px}.ui-corner-all,.ui-corner-bottom,.ui-corner-right,.ui-corner-br{border-bottom-right-radius:4px}.ui-widget-overlay{background:#aaa;opacity:.3;filter:Alpha(Opacity=30)}.ui-widget-shadow{box-shadow:-8px -8px 8px #aaa} \ No newline at end of file diff --git a/javadoc/script-dir/jquery-ui.css b/javadoc/script-dir/jquery-ui.css index c4487b41cdbe..f23f3cc96537 100644 --- a/javadoc/script-dir/jquery-ui.css +++ b/javadoc/script-dir/jquery-ui.css @@ -577,6 +577,5 @@ a.ui-button:active, filter: Alpha(Opacity=30); /* support: IE8 */ } .ui-widget-shadow { - -webkit-box-shadow: -8px -8px 8px #aaaaaa; box-shadow: -8px -8px 8px #aaaaaa; } diff --git a/javadoc/script-dir/jquery-ui.min.css b/javadoc/script-dir/jquery-ui.min.css index 6be72f58c02d..4d4533fd2126 100644 --- a/javadoc/script-dir/jquery-ui.min.css +++ b/javadoc/script-dir/jquery-ui.min.css @@ -4,4 +4,4 @@ * To view and modify this theme, visit http://jqueryui.com/themeroller/?scope=&folderName=custom-theme&bgImgOpacityError=95&bgImgOpacityHighlight=55&bgImgOpacityActive=65&bgImgOpacityHover=75&bgImgOpacityDefault=75&bgImgOpacityContent=75&bgImgOpacityHeader=75&cornerRadiusShadow=8px&offsetLeftShadow=-8px&offsetTopShadow=-8px&thicknessShadow=8px&opacityShadow=30&bgImgOpacityShadow=0&bgTextureShadow=flat&bgColorShadow=%23aaaaaa&opacityOverlay=30&bgImgOpacityOverlay=0&bgTextureOverlay=flat&bgColorOverlay=%23aaaaaa&iconColorError=%23cd0a0a&fcError=%23cd0a0a&borderColorError=%23cd0a0a&bgTextureError=glass&bgColorError=%23fef1ec&iconColorHighlight=%232e83ff&fcHighlight=%23363636&borderColorHighlight=%23fcefa1&bgTextureHighlight=glass&bgColorHighlight=%23fbf9ee&iconColorActive=%23454545&fcActive=%23212121&borderColorActive=%23aaaaaa&bgTextureActive=glass&bgColorActive=%23dadada&iconColorHover=%23454545&fcHover=%23212121&borderColorHover=%23999999&bgTextureHover=glass&bgColorHover=%23dadada&iconColorDefault=%23888888&fcDefault=%23555555&borderColorDefault=%23d3d3d3&bgTextureDefault=glass&bgColorDefault=%23e6e6e6&iconColorContent=%23222222&fcContent=%23222222&borderColorContent=%23aaaaaa&bgTextureContent=flat&bgColorContent=%23ffffff&iconColorHeader=%23222222&fcHeader=%23222222&borderColorHeader=%23aaaaaa&bgTextureHeader=highlight_soft&bgColorHeader=%23cccccc&cornerRadius=4px&fwDefault=normal&fsDefault=1.1em&ffDefault=Verdana%2CArial%2Csans-serif * Copyright jQuery Foundation and other contributors; Licensed MIT */ -.ui-helper-hidden{display:none}.ui-helper-hidden-accessible{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.ui-helper-reset{margin:0;padding:0;border:0;outline:0;line-height:1.3;text-decoration:none;font-size:100%;list-style:none}.ui-helper-clearfix:before,.ui-helper-clearfix:after{content:"";display:table;border-collapse:collapse}.ui-helper-clearfix:after{clear:both}.ui-helper-zfix{width:100%;height:100%;top:0;left:0;position:absolute;opacity:0;filter:Alpha(Opacity=0)}.ui-front{z-index:100}.ui-state-disabled{cursor:default!important;pointer-events:none}.ui-icon{display:inline-block;vertical-align:middle;margin-top:-.25em;position:relative;text-indent:-99999px;overflow:hidden;background-repeat:no-repeat}.ui-widget-icon-block{left:50%;margin-left:-8px;display:block}.ui-widget-overlay{position:fixed;top:0;left:0;width:100%;height:100%}.ui-autocomplete{position:absolute;top:0;left:0;cursor:default}.ui-menu{list-style:none;padding:0;margin:0;display:block;outline:0}.ui-menu .ui-menu{position:absolute}.ui-menu .ui-menu-item{margin:0;cursor:pointer;list-style-image:url("data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7")}.ui-menu .ui-menu-item-wrapper{position:relative;padding:3px 1em 3px .4em}.ui-menu .ui-menu-divider{margin:5px 0;height:0;font-size:0;line-height:0;border-width:1px 0 0 0}.ui-menu .ui-state-focus,.ui-menu .ui-state-active{margin:-1px}.ui-menu-icons{position:relative}.ui-menu-icons .ui-menu-item-wrapper{padding-left:2em}.ui-menu .ui-icon{position:absolute;top:0;bottom:0;left:.2em;margin:auto 0}.ui-menu .ui-menu-icon{left:auto;right:0}.ui-widget{font-family:Verdana,Arial,sans-serif;font-size:1.1em}.ui-widget .ui-widget{font-size:1em}.ui-widget input,.ui-widget select,.ui-widget textarea,.ui-widget button{font-family:Verdana,Arial,sans-serif;font-size:1em}.ui-widget.ui-widget-content{border:1px solid #d3d3d3}.ui-widget-content{border:1px solid #aaa;background:#fff;color:#222}.ui-widget-content a{color:#222}.ui-widget-header{border:1px solid #aaa;background:#ccc url("images/ui-bg_highlight-soft_75_cccccc_1x100.png") 50% 50% repeat-x;color:#222;font-weight:bold}.ui-widget-header a{color:#222}.ui-state-default,.ui-widget-content .ui-state-default,.ui-widget-header .ui-state-default,.ui-button,html .ui-button.ui-state-disabled:hover,html .ui-button.ui-state-disabled:active{border:1px solid #d3d3d3;background:#e6e6e6 url("images/ui-bg_glass_75_e6e6e6_1x400.png") 50% 50% repeat-x;font-weight:normal;color:#555}.ui-state-default a,.ui-state-default a:link,.ui-state-default a:visited,a.ui-button,a:link.ui-button,a:visited.ui-button,.ui-button{color:#555;text-decoration:none}.ui-state-hover,.ui-widget-content .ui-state-hover,.ui-widget-header .ui-state-hover,.ui-state-focus,.ui-widget-content .ui-state-focus,.ui-widget-header .ui-state-focus,.ui-button:hover,.ui-button:focus{border:1px solid #999;background:#dadada url("images/ui-bg_glass_75_dadada_1x400.png") 50% 50% repeat-x;font-weight:normal;color:#212121}.ui-state-hover a,.ui-state-hover a:hover,.ui-state-hover a:link,.ui-state-hover a:visited,.ui-state-focus a,.ui-state-focus a:hover,.ui-state-focus a:link,.ui-state-focus a:visited,a.ui-button:hover,a.ui-button:focus{color:#212121;text-decoration:none}.ui-visual-focus{box-shadow:0 0 3px 1px rgb(94,158,214)}.ui-state-active,.ui-widget-content .ui-state-active,.ui-widget-header .ui-state-active,a.ui-button:active,.ui-button:active,.ui-button.ui-state-active:hover{border:1px solid #aaa;background:#dadada url("images/ui-bg_glass_65_dadada_1x400.png") 50% 50% repeat-x;font-weight:normal;color:#212121}.ui-icon-background,.ui-state-active .ui-icon-background{border:#aaa;background-color:#212121}.ui-state-active a,.ui-state-active a:link,.ui-state-active a:visited{color:#212121;text-decoration:none}.ui-state-highlight,.ui-widget-content .ui-state-highlight,.ui-widget-header .ui-state-highlight{border:1px solid #fcefa1;background:#fbf9ee url("images/ui-bg_glass_55_fbf9ee_1x400.png") 50% 50% repeat-x;color:#363636}.ui-state-checked{border:1px solid #fcefa1;background:#fbf9ee}.ui-state-highlight a,.ui-widget-content .ui-state-highlight a,.ui-widget-header .ui-state-highlight a{color:#363636}.ui-state-error,.ui-widget-content .ui-state-error,.ui-widget-header .ui-state-error{border:1px solid #cd0a0a;background:#fef1ec url("images/ui-bg_glass_95_fef1ec_1x400.png") 50% 50% repeat-x;color:#cd0a0a}.ui-state-error a,.ui-widget-content .ui-state-error a,.ui-widget-header .ui-state-error a{color:#cd0a0a}.ui-state-error-text,.ui-widget-content .ui-state-error-text,.ui-widget-header .ui-state-error-text{color:#cd0a0a}.ui-priority-primary,.ui-widget-content .ui-priority-primary,.ui-widget-header .ui-priority-primary{font-weight:bold}.ui-priority-secondary,.ui-widget-content .ui-priority-secondary,.ui-widget-header .ui-priority-secondary{opacity:.7;filter:Alpha(Opacity=70);font-weight:normal}.ui-state-disabled,.ui-widget-content .ui-state-disabled,.ui-widget-header .ui-state-disabled{opacity:.35;filter:Alpha(Opacity=35);background-image:none}.ui-state-disabled .ui-icon{filter:Alpha(Opacity=35)}.ui-icon{width:16px;height:16px}.ui-icon,.ui-widget-content .ui-icon{background-image:url("images/ui-icons_222222_256x240.png")}.ui-widget-header .ui-icon{background-image:url("images/ui-icons_222222_256x240.png")}.ui-state-hover .ui-icon,.ui-state-focus .ui-icon,.ui-button:hover .ui-icon,.ui-button:focus .ui-icon{background-image:url("images/ui-icons_454545_256x240.png")}.ui-state-active .ui-icon,.ui-button:active .ui-icon{background-image:url("images/ui-icons_454545_256x240.png")}.ui-state-highlight .ui-icon,.ui-button .ui-state-highlight.ui-icon{background-image:url("images/ui-icons_2e83ff_256x240.png")}.ui-state-error .ui-icon,.ui-state-error-text .ui-icon{background-image:url("images/ui-icons_cd0a0a_256x240.png")}.ui-button .ui-icon{background-image:url("images/ui-icons_888888_256x240.png")}.ui-icon-blank{background-position:16px 16px}.ui-icon-caret-1-n{background-position:0 0}.ui-icon-caret-1-ne{background-position:-16px 0}.ui-icon-caret-1-e{background-position:-32px 0}.ui-icon-caret-1-se{background-position:-48px 0}.ui-icon-caret-1-s{background-position:-65px 0}.ui-icon-caret-1-sw{background-position:-80px 0}.ui-icon-caret-1-w{background-position:-96px 0}.ui-icon-caret-1-nw{background-position:-112px 0}.ui-icon-caret-2-n-s{background-position:-128px 0}.ui-icon-caret-2-e-w{background-position:-144px 0}.ui-icon-triangle-1-n{background-position:0 -16px}.ui-icon-triangle-1-ne{background-position:-16px -16px}.ui-icon-triangle-1-e{background-position:-32px -16px}.ui-icon-triangle-1-se{background-position:-48px -16px}.ui-icon-triangle-1-s{background-position:-65px -16px}.ui-icon-triangle-1-sw{background-position:-80px -16px}.ui-icon-triangle-1-w{background-position:-96px -16px}.ui-icon-triangle-1-nw{background-position:-112px -16px}.ui-icon-triangle-2-n-s{background-position:-128px -16px}.ui-icon-triangle-2-e-w{background-position:-144px -16px}.ui-icon-arrow-1-n{background-position:0 -32px}.ui-icon-arrow-1-ne{background-position:-16px -32px}.ui-icon-arrow-1-e{background-position:-32px -32px}.ui-icon-arrow-1-se{background-position:-48px -32px}.ui-icon-arrow-1-s{background-position:-65px -32px}.ui-icon-arrow-1-sw{background-position:-80px -32px}.ui-icon-arrow-1-w{background-position:-96px -32px}.ui-icon-arrow-1-nw{background-position:-112px -32px}.ui-icon-arrow-2-n-s{background-position:-128px -32px}.ui-icon-arrow-2-ne-sw{background-position:-144px -32px}.ui-icon-arrow-2-e-w{background-position:-160px -32px}.ui-icon-arrow-2-se-nw{background-position:-176px -32px}.ui-icon-arrowstop-1-n{background-position:-192px -32px}.ui-icon-arrowstop-1-e{background-position:-208px -32px}.ui-icon-arrowstop-1-s{background-position:-224px -32px}.ui-icon-arrowstop-1-w{background-position:-240px -32px}.ui-icon-arrowthick-1-n{background-position:1px -48px}.ui-icon-arrowthick-1-ne{background-position:-16px -48px}.ui-icon-arrowthick-1-e{background-position:-32px -48px}.ui-icon-arrowthick-1-se{background-position:-48px -48px}.ui-icon-arrowthick-1-s{background-position:-64px -48px}.ui-icon-arrowthick-1-sw{background-position:-80px -48px}.ui-icon-arrowthick-1-w{background-position:-96px -48px}.ui-icon-arrowthick-1-nw{background-position:-112px -48px}.ui-icon-arrowthick-2-n-s{background-position:-128px -48px}.ui-icon-arrowthick-2-ne-sw{background-position:-144px -48px}.ui-icon-arrowthick-2-e-w{background-position:-160px -48px}.ui-icon-arrowthick-2-se-nw{background-position:-176px -48px}.ui-icon-arrowthickstop-1-n{background-position:-192px -48px}.ui-icon-arrowthickstop-1-e{background-position:-208px -48px}.ui-icon-arrowthickstop-1-s{background-position:-224px -48px}.ui-icon-arrowthickstop-1-w{background-position:-240px -48px}.ui-icon-arrowreturnthick-1-w{background-position:0 -64px}.ui-icon-arrowreturnthick-1-n{background-position:-16px -64px}.ui-icon-arrowreturnthick-1-e{background-position:-32px -64px}.ui-icon-arrowreturnthick-1-s{background-position:-48px -64px}.ui-icon-arrowreturn-1-w{background-position:-64px -64px}.ui-icon-arrowreturn-1-n{background-position:-80px -64px}.ui-icon-arrowreturn-1-e{background-position:-96px -64px}.ui-icon-arrowreturn-1-s{background-position:-112px -64px}.ui-icon-arrowrefresh-1-w{background-position:-128px -64px}.ui-icon-arrowrefresh-1-n{background-position:-144px -64px}.ui-icon-arrowrefresh-1-e{background-position:-160px -64px}.ui-icon-arrowrefresh-1-s{background-position:-176px -64px}.ui-icon-arrow-4{background-position:0 -80px}.ui-icon-arrow-4-diag{background-position:-16px -80px}.ui-icon-extlink{background-position:-32px -80px}.ui-icon-newwin{background-position:-48px -80px}.ui-icon-refresh{background-position:-64px -80px}.ui-icon-shuffle{background-position:-80px -80px}.ui-icon-transfer-e-w{background-position:-96px -80px}.ui-icon-transferthick-e-w{background-position:-112px -80px}.ui-icon-folder-collapsed{background-position:0 -96px}.ui-icon-folder-open{background-position:-16px -96px}.ui-icon-document{background-position:-32px -96px}.ui-icon-document-b{background-position:-48px -96px}.ui-icon-note{background-position:-64px -96px}.ui-icon-mail-closed{background-position:-80px -96px}.ui-icon-mail-open{background-position:-96px -96px}.ui-icon-suitcase{background-position:-112px -96px}.ui-icon-comment{background-position:-128px -96px}.ui-icon-person{background-position:-144px -96px}.ui-icon-print{background-position:-160px -96px}.ui-icon-trash{background-position:-176px -96px}.ui-icon-locked{background-position:-192px -96px}.ui-icon-unlocked{background-position:-208px -96px}.ui-icon-bookmark{background-position:-224px -96px}.ui-icon-tag{background-position:-240px -96px}.ui-icon-home{background-position:0 -112px}.ui-icon-flag{background-position:-16px -112px}.ui-icon-calendar{background-position:-32px -112px}.ui-icon-cart{background-position:-48px -112px}.ui-icon-pencil{background-position:-64px -112px}.ui-icon-clock{background-position:-80px -112px}.ui-icon-disk{background-position:-96px -112px}.ui-icon-calculator{background-position:-112px -112px}.ui-icon-zoomin{background-position:-128px -112px}.ui-icon-zoomout{background-position:-144px -112px}.ui-icon-search{background-position:-160px -112px}.ui-icon-wrench{background-position:-176px -112px}.ui-icon-gear{background-position:-192px -112px}.ui-icon-heart{background-position:-208px -112px}.ui-icon-star{background-position:-224px -112px}.ui-icon-link{background-position:-240px -112px}.ui-icon-cancel{background-position:0 -128px}.ui-icon-plus{background-position:-16px -128px}.ui-icon-plusthick{background-position:-32px -128px}.ui-icon-minus{background-position:-48px -128px}.ui-icon-minusthick{background-position:-64px -128px}.ui-icon-close{background-position:-80px -128px}.ui-icon-closethick{background-position:-96px -128px}.ui-icon-key{background-position:-112px -128px}.ui-icon-lightbulb{background-position:-128px -128px}.ui-icon-scissors{background-position:-144px -128px}.ui-icon-clipboard{background-position:-160px -128px}.ui-icon-copy{background-position:-176px -128px}.ui-icon-contact{background-position:-192px -128px}.ui-icon-image{background-position:-208px -128px}.ui-icon-video{background-position:-224px -128px}.ui-icon-script{background-position:-240px -128px}.ui-icon-alert{background-position:0 -144px}.ui-icon-info{background-position:-16px -144px}.ui-icon-notice{background-position:-32px -144px}.ui-icon-help{background-position:-48px -144px}.ui-icon-check{background-position:-64px -144px}.ui-icon-bullet{background-position:-80px -144px}.ui-icon-radio-on{background-position:-96px -144px}.ui-icon-radio-off{background-position:-112px -144px}.ui-icon-pin-w{background-position:-128px -144px}.ui-icon-pin-s{background-position:-144px -144px}.ui-icon-play{background-position:0 -160px}.ui-icon-pause{background-position:-16px -160px}.ui-icon-seek-next{background-position:-32px -160px}.ui-icon-seek-prev{background-position:-48px -160px}.ui-icon-seek-end{background-position:-64px -160px}.ui-icon-seek-start{background-position:-80px -160px}.ui-icon-seek-first{background-position:-80px -160px}.ui-icon-stop{background-position:-96px -160px}.ui-icon-eject{background-position:-112px -160px}.ui-icon-volume-off{background-position:-128px -160px}.ui-icon-volume-on{background-position:-144px -160px}.ui-icon-power{background-position:0 -176px}.ui-icon-signal-diag{background-position:-16px -176px}.ui-icon-signal{background-position:-32px -176px}.ui-icon-battery-0{background-position:-48px -176px}.ui-icon-battery-1{background-position:-64px -176px}.ui-icon-battery-2{background-position:-80px -176px}.ui-icon-battery-3{background-position:-96px -176px}.ui-icon-circle-plus{background-position:0 -192px}.ui-icon-circle-minus{background-position:-16px -192px}.ui-icon-circle-close{background-position:-32px -192px}.ui-icon-circle-triangle-e{background-position:-48px -192px}.ui-icon-circle-triangle-s{background-position:-64px -192px}.ui-icon-circle-triangle-w{background-position:-80px -192px}.ui-icon-circle-triangle-n{background-position:-96px -192px}.ui-icon-circle-arrow-e{background-position:-112px -192px}.ui-icon-circle-arrow-s{background-position:-128px -192px}.ui-icon-circle-arrow-w{background-position:-144px -192px}.ui-icon-circle-arrow-n{background-position:-160px -192px}.ui-icon-circle-zoomin{background-position:-176px -192px}.ui-icon-circle-zoomout{background-position:-192px -192px}.ui-icon-circle-check{background-position:-208px -192px}.ui-icon-circlesmall-plus{background-position:0 -208px}.ui-icon-circlesmall-minus{background-position:-16px -208px}.ui-icon-circlesmall-close{background-position:-32px -208px}.ui-icon-squaresmall-plus{background-position:-48px -208px}.ui-icon-squaresmall-minus{background-position:-64px -208px}.ui-icon-squaresmall-close{background-position:-80px -208px}.ui-icon-grip-dotted-vertical{background-position:0 -224px}.ui-icon-grip-dotted-horizontal{background-position:-16px -224px}.ui-icon-grip-solid-vertical{background-position:-32px -224px}.ui-icon-grip-solid-horizontal{background-position:-48px -224px}.ui-icon-gripsmall-diagonal-se{background-position:-64px -224px}.ui-icon-grip-diagonal-se{background-position:-80px -224px}.ui-corner-all,.ui-corner-top,.ui-corner-left,.ui-corner-tl{border-top-left-radius:4px}.ui-corner-all,.ui-corner-top,.ui-corner-right,.ui-corner-tr{border-top-right-radius:4px}.ui-corner-all,.ui-corner-bottom,.ui-corner-left,.ui-corner-bl{border-bottom-left-radius:4px}.ui-corner-all,.ui-corner-bottom,.ui-corner-right,.ui-corner-br{border-bottom-right-radius:4px}.ui-widget-overlay{background:#aaa;opacity:.3;filter:Alpha(Opacity=30)}.ui-widget-shadow{-webkit-box-shadow:-8px -8px 8px #aaa;box-shadow:-8px -8px 8px #aaa} \ No newline at end of file +.ui-helper-hidden{display:none}.ui-helper-hidden-accessible{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.ui-helper-reset{margin:0;padding:0;border:0;outline:0;line-height:1.3;text-decoration:none;font-size:100%;list-style:none}.ui-helper-clearfix:before,.ui-helper-clearfix:after{content:"";display:table;border-collapse:collapse}.ui-helper-clearfix:after{clear:both}.ui-helper-zfix{width:100%;height:100%;top:0;left:0;position:absolute;opacity:0;filter:Alpha(Opacity=0)}.ui-front{z-index:100}.ui-state-disabled{cursor:default!important;pointer-events:none}.ui-icon{display:inline-block;vertical-align:middle;margin-top:-.25em;position:relative;text-indent:-99999px;overflow:hidden;background-repeat:no-repeat}.ui-widget-icon-block{left:50%;margin-left:-8px;display:block}.ui-widget-overlay{position:fixed;top:0;left:0;width:100%;height:100%}.ui-autocomplete{position:absolute;top:0;left:0;cursor:default}.ui-menu{list-style:none;padding:0;margin:0;display:block;outline:0}.ui-menu .ui-menu{position:absolute}.ui-menu .ui-menu-item{margin:0;cursor:pointer;list-style-image:url("data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7")}.ui-menu .ui-menu-item-wrapper{position:relative;padding:3px 1em 3px .4em}.ui-menu .ui-menu-divider{margin:5px 0;height:0;font-size:0;line-height:0;border-width:1px 0 0 0}.ui-menu .ui-state-focus,.ui-menu .ui-state-active{margin:-1px}.ui-menu-icons{position:relative}.ui-menu-icons .ui-menu-item-wrapper{padding-left:2em}.ui-menu .ui-icon{position:absolute;top:0;bottom:0;left:.2em;margin:auto 0}.ui-menu .ui-menu-icon{left:auto;right:0}.ui-widget{font-family:Verdana,Arial,sans-serif;font-size:1.1em}.ui-widget .ui-widget{font-size:1em}.ui-widget input,.ui-widget select,.ui-widget textarea,.ui-widget button{font-family:Verdana,Arial,sans-serif;font-size:1em}.ui-widget.ui-widget-content{border:1px solid #d3d3d3}.ui-widget-content{border:1px solid #aaa;background:#fff;color:#222}.ui-widget-content a{color:#222}.ui-widget-header{border:1px solid #aaa;background:#ccc url("images/ui-bg_highlight-soft_75_cccccc_1x100.png") 50% 50% repeat-x;color:#222;font-weight:bold}.ui-widget-header a{color:#222}.ui-state-default,.ui-widget-content .ui-state-default,.ui-widget-header .ui-state-default,.ui-button,html .ui-button.ui-state-disabled:hover,html .ui-button.ui-state-disabled:active{border:1px solid #d3d3d3;background:#e6e6e6 url("images/ui-bg_glass_75_e6e6e6_1x400.png") 50% 50% repeat-x;font-weight:normal;color:#555}.ui-state-default a,.ui-state-default a:link,.ui-state-default a:visited,a.ui-button,a:link.ui-button,a:visited.ui-button,.ui-button{color:#555;text-decoration:none}.ui-state-hover,.ui-widget-content .ui-state-hover,.ui-widget-header .ui-state-hover,.ui-state-focus,.ui-widget-content .ui-state-focus,.ui-widget-header .ui-state-focus,.ui-button:hover,.ui-button:focus{border:1px solid #999;background:#dadada url("images/ui-bg_glass_75_dadada_1x400.png") 50% 50% repeat-x;font-weight:normal;color:#212121}.ui-state-hover a,.ui-state-hover a:hover,.ui-state-hover a:link,.ui-state-hover a:visited,.ui-state-focus a,.ui-state-focus a:hover,.ui-state-focus a:link,.ui-state-focus a:visited,a.ui-button:hover,a.ui-button:focus{color:#212121;text-decoration:none}.ui-visual-focus{box-shadow:0 0 3px 1px rgb(94,158,214)}.ui-state-active,.ui-widget-content .ui-state-active,.ui-widget-header .ui-state-active,a.ui-button:active,.ui-button:active,.ui-button.ui-state-active:hover{border:1px solid #aaa;background:#dadada url("images/ui-bg_glass_65_dadada_1x400.png") 50% 50% repeat-x;font-weight:normal;color:#212121}.ui-icon-background,.ui-state-active .ui-icon-background{border:#aaa;background-color:#212121}.ui-state-active a,.ui-state-active a:link,.ui-state-active a:visited{color:#212121;text-decoration:none}.ui-state-highlight,.ui-widget-content .ui-state-highlight,.ui-widget-header .ui-state-highlight{border:1px solid #fcefa1;background:#fbf9ee url("images/ui-bg_glass_55_fbf9ee_1x400.png") 50% 50% repeat-x;color:#363636}.ui-state-checked{border:1px solid #fcefa1;background:#fbf9ee}.ui-state-highlight a,.ui-widget-content .ui-state-highlight a,.ui-widget-header .ui-state-highlight a{color:#363636}.ui-state-error,.ui-widget-content .ui-state-error,.ui-widget-header .ui-state-error{border:1px solid #cd0a0a;background:#fef1ec url("images/ui-bg_glass_95_fef1ec_1x400.png") 50% 50% repeat-x;color:#cd0a0a}.ui-state-error a,.ui-widget-content .ui-state-error a,.ui-widget-header .ui-state-error a{color:#cd0a0a}.ui-state-error-text,.ui-widget-content .ui-state-error-text,.ui-widget-header .ui-state-error-text{color:#cd0a0a}.ui-priority-primary,.ui-widget-content .ui-priority-primary,.ui-widget-header .ui-priority-primary{font-weight:bold}.ui-priority-secondary,.ui-widget-content .ui-priority-secondary,.ui-widget-header .ui-priority-secondary{opacity:.7;filter:Alpha(Opacity=70);font-weight:normal}.ui-state-disabled,.ui-widget-content .ui-state-disabled,.ui-widget-header .ui-state-disabled{opacity:.35;filter:Alpha(Opacity=35);background-image:none}.ui-state-disabled .ui-icon{filter:Alpha(Opacity=35)}.ui-icon{width:16px;height:16px}.ui-icon,.ui-widget-content .ui-icon{background-image:url("images/ui-icons_222222_256x240.png")}.ui-widget-header .ui-icon{background-image:url("images/ui-icons_222222_256x240.png")}.ui-state-hover .ui-icon,.ui-state-focus .ui-icon,.ui-button:hover .ui-icon,.ui-button:focus .ui-icon{background-image:url("images/ui-icons_454545_256x240.png")}.ui-state-active .ui-icon,.ui-button:active .ui-icon{background-image:url("images/ui-icons_454545_256x240.png")}.ui-state-highlight .ui-icon,.ui-button .ui-state-highlight.ui-icon{background-image:url("images/ui-icons_2e83ff_256x240.png")}.ui-state-error .ui-icon,.ui-state-error-text .ui-icon{background-image:url("images/ui-icons_cd0a0a_256x240.png")}.ui-button .ui-icon{background-image:url("images/ui-icons_888888_256x240.png")}.ui-icon-blank{background-position:16px 16px}.ui-icon-caret-1-n{background-position:0 0}.ui-icon-caret-1-ne{background-position:-16px 0}.ui-icon-caret-1-e{background-position:-32px 0}.ui-icon-caret-1-se{background-position:-48px 0}.ui-icon-caret-1-s{background-position:-65px 0}.ui-icon-caret-1-sw{background-position:-80px 0}.ui-icon-caret-1-w{background-position:-96px 0}.ui-icon-caret-1-nw{background-position:-112px 0}.ui-icon-caret-2-n-s{background-position:-128px 0}.ui-icon-caret-2-e-w{background-position:-144px 0}.ui-icon-triangle-1-n{background-position:0 -16px}.ui-icon-triangle-1-ne{background-position:-16px -16px}.ui-icon-triangle-1-e{background-position:-32px -16px}.ui-icon-triangle-1-se{background-position:-48px -16px}.ui-icon-triangle-1-s{background-position:-65px -16px}.ui-icon-triangle-1-sw{background-position:-80px -16px}.ui-icon-triangle-1-w{background-position:-96px -16px}.ui-icon-triangle-1-nw{background-position:-112px -16px}.ui-icon-triangle-2-n-s{background-position:-128px -16px}.ui-icon-triangle-2-e-w{background-position:-144px -16px}.ui-icon-arrow-1-n{background-position:0 -32px}.ui-icon-arrow-1-ne{background-position:-16px -32px}.ui-icon-arrow-1-e{background-position:-32px -32px}.ui-icon-arrow-1-se{background-position:-48px -32px}.ui-icon-arrow-1-s{background-position:-65px -32px}.ui-icon-arrow-1-sw{background-position:-80px -32px}.ui-icon-arrow-1-w{background-position:-96px -32px}.ui-icon-arrow-1-nw{background-position:-112px -32px}.ui-icon-arrow-2-n-s{background-position:-128px -32px}.ui-icon-arrow-2-ne-sw{background-position:-144px -32px}.ui-icon-arrow-2-e-w{background-position:-160px -32px}.ui-icon-arrow-2-se-nw{background-position:-176px -32px}.ui-icon-arrowstop-1-n{background-position:-192px -32px}.ui-icon-arrowstop-1-e{background-position:-208px -32px}.ui-icon-arrowstop-1-s{background-position:-224px -32px}.ui-icon-arrowstop-1-w{background-position:-240px -32px}.ui-icon-arrowthick-1-n{background-position:1px -48px}.ui-icon-arrowthick-1-ne{background-position:-16px -48px}.ui-icon-arrowthick-1-e{background-position:-32px -48px}.ui-icon-arrowthick-1-se{background-position:-48px -48px}.ui-icon-arrowthick-1-s{background-position:-64px -48px}.ui-icon-arrowthick-1-sw{background-position:-80px -48px}.ui-icon-arrowthick-1-w{background-position:-96px -48px}.ui-icon-arrowthick-1-nw{background-position:-112px -48px}.ui-icon-arrowthick-2-n-s{background-position:-128px -48px}.ui-icon-arrowthick-2-ne-sw{background-position:-144px -48px}.ui-icon-arrowthick-2-e-w{background-position:-160px -48px}.ui-icon-arrowthick-2-se-nw{background-position:-176px -48px}.ui-icon-arrowthickstop-1-n{background-position:-192px -48px}.ui-icon-arrowthickstop-1-e{background-position:-208px -48px}.ui-icon-arrowthickstop-1-s{background-position:-224px -48px}.ui-icon-arrowthickstop-1-w{background-position:-240px -48px}.ui-icon-arrowreturnthick-1-w{background-position:0 -64px}.ui-icon-arrowreturnthick-1-n{background-position:-16px -64px}.ui-icon-arrowreturnthick-1-e{background-position:-32px -64px}.ui-icon-arrowreturnthick-1-s{background-position:-48px -64px}.ui-icon-arrowreturn-1-w{background-position:-64px -64px}.ui-icon-arrowreturn-1-n{background-position:-80px -64px}.ui-icon-arrowreturn-1-e{background-position:-96px -64px}.ui-icon-arrowreturn-1-s{background-position:-112px -64px}.ui-icon-arrowrefresh-1-w{background-position:-128px -64px}.ui-icon-arrowrefresh-1-n{background-position:-144px -64px}.ui-icon-arrowrefresh-1-e{background-position:-160px -64px}.ui-icon-arrowrefresh-1-s{background-position:-176px -64px}.ui-icon-arrow-4{background-position:0 -80px}.ui-icon-arrow-4-diag{background-position:-16px -80px}.ui-icon-extlink{background-position:-32px -80px}.ui-icon-newwin{background-position:-48px -80px}.ui-icon-refresh{background-position:-64px -80px}.ui-icon-shuffle{background-position:-80px -80px}.ui-icon-transfer-e-w{background-position:-96px -80px}.ui-icon-transferthick-e-w{background-position:-112px -80px}.ui-icon-folder-collapsed{background-position:0 -96px}.ui-icon-folder-open{background-position:-16px -96px}.ui-icon-document{background-position:-32px -96px}.ui-icon-document-b{background-position:-48px -96px}.ui-icon-note{background-position:-64px -96px}.ui-icon-mail-closed{background-position:-80px -96px}.ui-icon-mail-open{background-position:-96px -96px}.ui-icon-suitcase{background-position:-112px -96px}.ui-icon-comment{background-position:-128px -96px}.ui-icon-person{background-position:-144px -96px}.ui-icon-print{background-position:-160px -96px}.ui-icon-trash{background-position:-176px -96px}.ui-icon-locked{background-position:-192px -96px}.ui-icon-unlocked{background-position:-208px -96px}.ui-icon-bookmark{background-position:-224px -96px}.ui-icon-tag{background-position:-240px -96px}.ui-icon-home{background-position:0 -112px}.ui-icon-flag{background-position:-16px -112px}.ui-icon-calendar{background-position:-32px -112px}.ui-icon-cart{background-position:-48px -112px}.ui-icon-pencil{background-position:-64px -112px}.ui-icon-clock{background-position:-80px -112px}.ui-icon-disk{background-position:-96px -112px}.ui-icon-calculator{background-position:-112px -112px}.ui-icon-zoomin{background-position:-128px -112px}.ui-icon-zoomout{background-position:-144px -112px}.ui-icon-search{background-position:-160px -112px}.ui-icon-wrench{background-position:-176px -112px}.ui-icon-gear{background-position:-192px -112px}.ui-icon-heart{background-position:-208px -112px}.ui-icon-star{background-position:-224px -112px}.ui-icon-link{background-position:-240px -112px}.ui-icon-cancel{background-position:0 -128px}.ui-icon-plus{background-position:-16px -128px}.ui-icon-plusthick{background-position:-32px -128px}.ui-icon-minus{background-position:-48px -128px}.ui-icon-minusthick{background-position:-64px -128px}.ui-icon-close{background-position:-80px -128px}.ui-icon-closethick{background-position:-96px -128px}.ui-icon-key{background-position:-112px -128px}.ui-icon-lightbulb{background-position:-128px -128px}.ui-icon-scissors{background-position:-144px -128px}.ui-icon-clipboard{background-position:-160px -128px}.ui-icon-copy{background-position:-176px -128px}.ui-icon-contact{background-position:-192px -128px}.ui-icon-image{background-position:-208px -128px}.ui-icon-video{background-position:-224px -128px}.ui-icon-script{background-position:-240px -128px}.ui-icon-alert{background-position:0 -144px}.ui-icon-info{background-position:-16px -144px}.ui-icon-notice{background-position:-32px -144px}.ui-icon-help{background-position:-48px -144px}.ui-icon-check{background-position:-64px -144px}.ui-icon-bullet{background-position:-80px -144px}.ui-icon-radio-on{background-position:-96px -144px}.ui-icon-radio-off{background-position:-112px -144px}.ui-icon-pin-w{background-position:-128px -144px}.ui-icon-pin-s{background-position:-144px -144px}.ui-icon-play{background-position:0 -160px}.ui-icon-pause{background-position:-16px -160px}.ui-icon-seek-next{background-position:-32px -160px}.ui-icon-seek-prev{background-position:-48px -160px}.ui-icon-seek-end{background-position:-64px -160px}.ui-icon-seek-start{background-position:-80px -160px}.ui-icon-seek-first{background-position:-80px -160px}.ui-icon-stop{background-position:-96px -160px}.ui-icon-eject{background-position:-112px -160px}.ui-icon-volume-off{background-position:-128px -160px}.ui-icon-volume-on{background-position:-144px -160px}.ui-icon-power{background-position:0 -176px}.ui-icon-signal-diag{background-position:-16px -176px}.ui-icon-signal{background-position:-32px -176px}.ui-icon-battery-0{background-position:-48px -176px}.ui-icon-battery-1{background-position:-64px -176px}.ui-icon-battery-2{background-position:-80px -176px}.ui-icon-battery-3{background-position:-96px -176px}.ui-icon-circle-plus{background-position:0 -192px}.ui-icon-circle-minus{background-position:-16px -192px}.ui-icon-circle-close{background-position:-32px -192px}.ui-icon-circle-triangle-e{background-position:-48px -192px}.ui-icon-circle-triangle-s{background-position:-64px -192px}.ui-icon-circle-triangle-w{background-position:-80px -192px}.ui-icon-circle-triangle-n{background-position:-96px -192px}.ui-icon-circle-arrow-e{background-position:-112px -192px}.ui-icon-circle-arrow-s{background-position:-128px -192px}.ui-icon-circle-arrow-w{background-position:-144px -192px}.ui-icon-circle-arrow-n{background-position:-160px -192px}.ui-icon-circle-zoomin{background-position:-176px -192px}.ui-icon-circle-zoomout{background-position:-192px -192px}.ui-icon-circle-check{background-position:-208px -192px}.ui-icon-circlesmall-plus{background-position:0 -208px}.ui-icon-circlesmall-minus{background-position:-16px -208px}.ui-icon-circlesmall-close{background-position:-32px -208px}.ui-icon-squaresmall-plus{background-position:-48px -208px}.ui-icon-squaresmall-minus{background-position:-64px -208px}.ui-icon-squaresmall-close{background-position:-80px -208px}.ui-icon-grip-dotted-vertical{background-position:0 -224px}.ui-icon-grip-dotted-horizontal{background-position:-16px -224px}.ui-icon-grip-solid-vertical{background-position:-32px -224px}.ui-icon-grip-solid-horizontal{background-position:-48px -224px}.ui-icon-gripsmall-diagonal-se{background-position:-64px -224px}.ui-icon-grip-diagonal-se{background-position:-80px -224px}.ui-corner-all,.ui-corner-top,.ui-corner-left,.ui-corner-tl{border-top-left-radius:4px}.ui-corner-all,.ui-corner-top,.ui-corner-right,.ui-corner-tr{border-top-right-radius:4px}.ui-corner-all,.ui-corner-bottom,.ui-corner-left,.ui-corner-bl{border-bottom-left-radius:4px}.ui-corner-all,.ui-corner-bottom,.ui-corner-right,.ui-corner-br{border-bottom-right-radius:4px}.ui-widget-overlay{background:#aaa;opacity:.3;filter:Alpha(Opacity=30)}.ui-widget-shadow{box-shadow:-8px -8px 8px #aaa} \ No newline at end of file diff --git a/mobile/android/index.html b/mobile/android/index.html new file mode 100644 index 000000000000..007d2c17a8a3 --- /dev/null +++ b/mobile/android/index.html @@ -0,0 +1,749 @@ + + + + + + + + + + + + + Android | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
            +
            + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
            +
            +
            +
            + + +
            + + + + + + + + +
            + +
            +
            + + +
            + + + +
            +
            +

            PyTorch Mobile

            + +

            End-to-end workflow from Training to Deployment for iOS and Android mobile devices

            +
            +
            + +
            +
            + + +
            +
            +
            +
            +

            Shortcuts

            +
              +
              + + +
              +
              +
              +
              +
              +

              Note

              +

              PyTorch Mobile is no longer actively supported. Please check out ExecuTorch, PyTorch’s all-new on-device inference library. You can also review this page to learn more about how to use ExecuTorch to build an Android app.

              +
              + +

              Android

              + +

              Quickstart with a HelloWorld Example

              + +

              HelloWorld is a simple image classification application that demonstrates how to use PyTorch Android API. +This application runs TorchScript serialized TorchVision pretrained resnet18 model on static image which is packaged inside the app as android asset.

              + +

              1. Model Preparation

              + +

              Let’s start with model preparation. If you are familiar with PyTorch, you probably should already know how to train and save your model. In case you don’t, we are going to use a pre-trained image classification model (MobileNetV2). +To install it, run the command below:

              +
              pip install torchvision
              +
              + +

              To serialize the model you can use python script in the root folder of HelloWorld app:

              +
              import torch
              +import torchvision
              +from torch.utils.mobile_optimizer import optimize_for_mobile
              +
              +model = torchvision.models.mobilenet_v2(pretrained=True)
              +model.eval()
              +example = torch.rand(1, 3, 224, 224)
              +traced_script_module = torch.jit.trace(model, example)
              +traced_script_module_optimized = optimize_for_mobile(traced_script_module)
              +traced_script_module_optimized._save_for_lite_interpreter("app/src/main/assets/model.ptl")
              +
              +
              +

              If everything works well, we should have our model - model.ptl generated in the assets folder of android application. +That will be packaged inside android application as asset and can be used on the device.

              + +

              More details about TorchScript you can find in tutorials on pytorch.org

              + +

              2. Cloning from github

              +
              git clone https://github.com/pytorch/android-demo-app.git
              +cd HelloWorldApp
              +
              +

              If Android SDK and Android NDK are already installed you can install this application to the connected android device or emulator with:

              +
              ./gradlew installDebug
              +
              + +

              We recommend you to open this project in Android Studio 3.5.1+. At the moment PyTorch Android and demo applications use android gradle plugin of version 3.5.0, which is supported only by Android Studio version 3.5.1 and higher. +Using Android Studio you will be able to install Android NDK and Android SDK with Android Studio UI.

              + +

              3. Gradle dependencies

              + +

              Pytorch android is added to the HelloWorld as gradle dependencies in build.gradle:

              + +
              repositories {
              +    jcenter()
              +}
              +
              +dependencies {
              +    implementation 'org.pytorch:pytorch_android_lite:1.9.0'
              +    implementation 'org.pytorch:pytorch_android_torchvision:1.9.0'
              +}
              +
              +

              Where org.pytorch:pytorch_android is the main dependency with PyTorch Android API, including libtorch native library for all 4 android abis (armeabi-v7a, arm64-v8a, x86, x86_64). +Further in this doc you can find how to rebuild it only for specific list of android abis.

              + +

              org.pytorch:pytorch_android_torchvision - additional library with utility functions for converting android.media.Image and android.graphics.Bitmap to tensors.

              + +

              4. Reading image from Android Asset

              + +

              All the logic happens in org.pytorch.helloworld.MainActivity. +As a first step we read image.jpg to android.graphics.Bitmap using the standard Android API.

              +
              Bitmap bitmap = BitmapFactory.decodeStream(getAssets().open("image.jpg"));
              +
              + +

              5. Loading Mobile Module

              +
              Module module = Module.load(assetFilePath(this, "model.ptl"));
              +
              +

              org.pytorch.Module represents torch::jit::mobile::Module that can be loaded with load method specifying file path to the serialized to file model.

              + +

              6. Preparing Input

              +
              Tensor inputTensor = TensorImageUtils.bitmapToFloat32Tensor(bitmap,
              +    TensorImageUtils.TORCHVISION_NORM_MEAN_RGB, TensorImageUtils.TORCHVISION_NORM_STD_RGB);
              +
              +

              org.pytorch.torchvision.TensorImageUtils is part of org.pytorch:pytorch_android_torchvision library. +The TensorImageUtils#bitmapToFloat32Tensor method creates tensors in the torchvision format using android.graphics.Bitmap as a source.

              + +
              +

              All pre-trained models expect input images normalized in the same way, i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] and std = [0.229, 0.224, 0.225]

              +
              + +

              inputTensor’s shape is 1x3xHxW, where H and W are bitmap height and width appropriately.

              + +

              7. Run Inference

              + +
              Tensor outputTensor = module.forward(IValue.from(inputTensor)).toTensor();
              +float[] scores = outputTensor.getDataAsFloatArray();
              +
              + +

              org.pytorch.Module.forward method runs loaded module’s forward method and gets result as org.pytorch.Tensor outputTensor with shape 1x1000.

              + +

              8. Processing results

              +

              Its content is retrieved using org.pytorch.Tensor.getDataAsFloatArray() method that returns java array of floats with scores for every image net class.

              + +

              After that we just find index with maximum score and retrieve predicted class name from ImageNetClasses.IMAGENET_CLASSES array that contains all ImageNet classes.

              + +
              float maxScore = -Float.MAX_VALUE;
              +int maxScoreIdx = -1;
              +for (int i = 0; i < scores.length; i++) {
              +  if (scores[i] > maxScore) {
              +    maxScore = scores[i];
              +    maxScoreIdx = i;
              +  }
              +}
              +String className = ImageNetClasses.IMAGENET_CLASSES[maxScoreIdx];
              +
              + +

              In the following sections you can find detailed explanations of PyTorch Android API, code walk through for a bigger demo application, +implementation details of the API, how to customize and build it from source.

              + +

              PyTorch Demo Application

              + +

              We have also created another more complex PyTorch Android demo application that does image classification from camera output and text classification in the same github repo.

              + +

              To get device camera output it uses Android CameraX API. +All the logic that works with CameraX is separated to org.pytorch.demo.vision.AbstractCameraXActivity class.

              + +
              void setupCameraX() {
              +    final PreviewConfig previewConfig = new PreviewConfig.Builder().build();
              +    final Preview preview = new Preview(previewConfig);
              +    preview.setOnPreviewOutputUpdateListener(output -> mTextureView.setSurfaceTexture(output.getSurfaceTexture()));
              +
              +    final ImageAnalysisConfig imageAnalysisConfig =
              +        new ImageAnalysisConfig.Builder()
              +            .setTargetResolution(new Size(224, 224))
              +            .setCallbackHandler(mBackgroundHandler)
              +            .setImageReaderMode(ImageAnalysis.ImageReaderMode.ACQUIRE_LATEST_IMAGE)
              +            .build();
              +    final ImageAnalysis imageAnalysis = new ImageAnalysis(imageAnalysisConfig);
              +    imageAnalysis.setAnalyzer(
              +        (image, rotationDegrees) -> {
              +          analyzeImage(image, rotationDegrees);
              +        });
              +
              +    CameraX.bindToLifecycle(this, preview, imageAnalysis);
              +  }
              +
              +  void analyzeImage(android.media.Image, int rotationDegrees)
              +
              + +

              Where the analyzeImage method process the camera output, android.media.Image.

              + +

              It uses the aforementioned TensorImageUtils.imageYUV420CenterCropToFloat32Tensor method to convert android.media.Image in YUV420 format to input tensor.

              + +

              After getting predicted scores from the model it finds top K classes with the highest scores and shows on the UI.

              + +

              Language Processing Example

              + +

              Another example is natural language processing, based on an LSTM model, trained on a reddit comments dataset. +The logic happens in TextClassificattionActivity.

              + +

              Result class names are packaged inside the TorchScript model and initialized just after initial module initialization. +The module has a get_classes method that returns List[str], which can be called using method Module.runMethod(methodName):

              +
                  mModule = Module.load(moduleFileAbsoluteFilePath);
              +    IValue getClassesOutput = mModule.runMethod("get_classes");
              +
              +

              The returned IValue can be converted to java array of IValue using IValue.toList() and processed to an array of strings using IValue.toStr():

              +
                  IValue[] classesListIValue = getClassesOutput.toList();
              +    String[] moduleClasses = new String[classesListIValue.length];
              +    int i = 0;
              +    for (IValue iv : classesListIValue) {
              +      moduleClasses[i++] = iv.toStr();
              +    }
              +
              + +

              Entered text is converted to java array of bytes with UTF-8 encoding. Tensor.fromBlobUnsigned creates tensor of dtype=uint8 from that array of bytes.

              +
                  byte[] bytes = text.getBytes(Charset.forName("UTF-8"));
              +    final long[] shape = new long[]{1, bytes.length};
              +    final Tensor inputTensor = Tensor.fromBlobUnsigned(bytes, shape);
              +
              + +

              Running inference of the model is similar to previous examples:

              +
              Tensor outputTensor = mModule.forward(IValue.from(inputTensor)).toTensor()
              +
              + +

              After that, the code processes the output, finding classes with the highest scores.

              + +

              More PyTorch Android Demo Apps

              + +

              D2go

              + +

              D2Go demonstrates a Python script that creates the much lighter and much faster Facebook D2Go model that is powered by PyTorch 1.8, torchvision 0.9, and Detectron2 with built-in SOTA networks for mobile, and an Android app that uses it to detect objects from pictures in your photos, taken with camera, or with live camera. This demo app also shows how to use the native pre-built torchvision-ops library.

              + +

              Image Segmentation

              + +

              Image Segmentation demonstrates a Python script that converts the PyTorch DeepLabV3 model and an Android app that uses the model to segment images.

              + +

              Object Detection

              + +

              Object Detection demonstrates how to convert the popular YOLOv5 model and use it in an Android app that detects objects from pictures in your photos, taken with camera, or with live camera.

              + +

              Neural Machine Translation

              + +

              Neural Machine Translation demonstrates how to convert a sequence-to-sequence neural machine translation model trained with the code in the PyTorch NMT tutorial and use the model in an Android app to do French-English translation.

              + +

              Question Answering

              + +

              Question Answering demonstrates how to convert a powerful transformer QA model and use the model in an Android app to answer questions about PyTorch Mobile and more.

              + +

              Vision Transformer

              + +

              Vision Transformer demonstrates how to use Facebook’s latest Vision Transformer DeiT model to do image classification, and how convert another Vision Transformer model and use it in an Android app to perform handwritten digit recognition.

              + +

              Speech recognition

              + +

              Speech Recognition demonstrates how to convert Facebook AI’s wav2vec 2.0, one of the leading models in speech recognition, to TorchScript and how to use the scripted model in an Android app to perform speech recognition.

              + +

              Video Classification

              + +

              TorchVideo demonstrates how to use a pre-trained video classification model, available at the newly released PyTorchVideo, on Android to see video classification results, updated per second while the video plays, on tested videos, videos from the Photos library, or even real-time videos.

              + +

              PyTorch Android Tutorial and Recipes

              + +

              Image Segmentation DeepLabV3 on Android

              + +

              A comprehensive step-by-step tutorial on how to prepare and run the PyTorch DeepLabV3 image segmentation model on Android.

              + +

              PyTorch Mobile Performance Recipes

              + +

              List of recipes for performance optimizations for using PyTorch on Mobile.

              + +

              Making Android Native Application That Uses PyTorch Android Prebuilt Libraries

              + +

              Learn how to make Android application from the scratch that uses LibTorch C++ API and uses TorchScript model with custom C++ operator.

              + +

              Fuse Modules recipe

              + +

              Learn how to fuse a list of PyTorch modules into a single module to reduce the model size before quantization.

              + +

              Quantization for Mobile Recipe

              + +

              Learn how to reduce the model size and make it run faster without losing much on accuracy.

              + +

              Script and Optimize for Mobile

              + +

              Learn how to convert the model to TorchScipt and (optional) optimize it for mobile apps.

              + +

              Model Preparation for Android Recipe

              + +

              Learn how to add the model in an Android project and use the PyTorch library for Android.

              + +

              Building PyTorch Android from Source

              + +

              In some cases you might want to use a local build of PyTorch android, for example you may build custom LibTorch binary with another set of operators or to make local changes, or try out the latest PyTorch code.

              + +

              For this you can use ./scripts/build_pytorch_android.sh script.

              +
              git clone https://github.com/pytorch/pytorch.git
              +cd pytorch
              +sh ./scripts/build_pytorch_android.sh
              +
              + +

              The workflow contains several steps:

              + +

              1. Build libtorch for android for all 4 android abis (armeabi-v7a, arm64-v8a, x86, x86_64)

              + +

              2. Create symbolic links to the results of those builds: +android/pytorch_android/src/main/jniLibs/${abi} to the directory with output libraries +android/pytorch_android/src/main/cpp/libtorch_include/${abi} to the directory with headers. These directories are used to build libpytorch_jni.so library, as part of the pytorch_android-release.aar bundle, that will be loaded on android device.

              + +

              3. And finally run gradle in android/pytorch_android directory with task assembleRelease

              + +

              Script requires that Android SDK, Android NDK, Java SDK, and gradle are installed. +They are specified as environment variables:

              + +

              ANDROID_HOME - path to Android SDK

              + +

              ANDROID_NDK - path to Android NDK. It’s recommended to use NDK 21.x.

              + +

              GRADLE_HOME - path to gradle

              + +

              JAVA_HOME - path to JAVA JDK

              + +

              After successful build, you should see the result as aar file:

              + +
              $ find android -type f -name *aar
              +android/pytorch_android/build/outputs/aar/pytorch_android-release.aar
              +android/pytorch_android_torchvision/build/outputs/aar/pytorch_android_torchvision-release.aar
              +
              + +

              Using the PyTorch Android Libraries Built from Source or Nightly

              + +

              First add the two aar files built above, or downloaded from the nightly built PyTorch Android repos at here and here, to the Android project’s lib folder, then add in the project’s app build.gradle file:

              +
              allprojects {
              +    repositories {
              +        flatDir {
              +            dirs 'libs'
              +        }
              +    }
              +}
              +
              +dependencies {
              +
              +    // if using the libraries built from source
              +    implementation(name:'pytorch_android-release', ext:'aar')
              +    implementation(name:'pytorch_android_torchvision-release', ext:'aar')
              +
              +    // if using the nightly built libraries downloaded above, for example the 1.8.0-snapshot on Jan. 21, 2021
              +    // implementation(name:'pytorch_android-1.8.0-20210121.092759-172', ext:'aar')
              +    // implementation(name:'pytorch_android_torchvision-1.8.0-20210121.092817-173', ext:'aar')
              +
              +    ...
              +    implementation 'com.android.support:appcompat-v7:28.0.0'
              +    implementation 'com.facebook.fbjni:fbjni-java-only:0.0.3'
              +}
              +
              + +

              Also we have to add all transitive dependencies of our aars. As pytorch_android depends on com.android.support:appcompat-v7:28.0.0 or androidx.appcompat:appcompat:1.2.0, we need to one of them. (In case of using maven dependencies they are added automatically from pom.xml).

              + +

              Using the Nightly PyTorch Android Libraries

              + +

              Other than using the aar files built from source or downloaded from the links in the previous section, you can also use the nightly built Android PyTorch and TorchVision libraries by adding in your app build.gradle file the maven url and the nightly libraries implementation as follows:

              + +
              repositories {
              +    maven {
              +        url "https://oss.sonatype.org/content/repositories/snapshots"
              +    }
              +}
              +
              +dependencies {
              +    ...
              +    implementation 'org.pytorch:pytorch_android:1.8.0-SNAPSHOT'
              +    implementation 'org.pytorch:pytorch_android_torchvision:1.8.0-SNAPSHOT'
              +}
              +
              + +

              This is the easiest way to try out the latest PyTorch code and the Android libraries, if you do not need to make any local changes. But be aware you may need to build the model used on mobile in the latest PyTorch - using either the latest PyTorch code or a quick nightly install with commands like pip install --pre torch torchvision -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html - to avoid possible model version mismatch errors when running the model on mobile.

              + +

              Custom Build

              + +

              To reduce the size of binaries you can do custom build of PyTorch Android with only set of operators required by your model. +This includes two steps: preparing the list of operators from your model, rebuilding pytorch android with specified list.

              + +

              1. Verify your PyTorch version is 1.4.0 or above. You can do that by checking the value of torch.__version__.

              + +

              2. Preparation of the list of operators

              + +

              List of operators of your serialized torchscript model can be prepared in yaml format using python api function torch.jit.export_opnames(). +To dump the operators in your model, say MobileNetV2, run the following lines of Python code:

              +
              # Dump list of operators used by MobileNetV2:
              +import torch, yaml
              +model = torch.jit.load('MobileNetV2.pt')
              +ops = torch.jit.export_opnames(model)
              +with open('MobileNetV2.yaml', 'w') as output:
              +    yaml.dump(ops, output)
              +
              +

              3. Building PyTorch Android with prepared operators list.

              + +

              To build PyTorch Android with the prepared yaml list of operators, specify it in the environment variable SELECTED_OP_LIST. Also in the arguments, specify which Android ABIs it should build; by default it builds all 4 Android ABIs.

              + +
              # Build PyTorch Android library customized for MobileNetV2:
              +SELECTED_OP_LIST=MobileNetV2.yaml scripts/build_pytorch_android.sh arm64-v8a
              +
              + +

              After successful build you can integrate the result aar files to your android gradle project, following the steps from previous section of this tutorial (Building PyTorch Android from Source).

              + +

              Use PyTorch JIT interpreter

              + +

              PyTorch JIT interpreter is the default interpreter before 1.9 (a version of our PyTorch interpreter that is not as size-efficient). It will still be supported in 1.9, and can be used via build.gradle:

              +
              repositories {
              +    jcenter()
              +}
              +
              +dependencies {
              +    implementation 'org.pytorch:pytorch_android:1.9.0'
              +    implementation 'org.pytorch:pytorch_android_torchvision:1.9.0'
              +}
              +
              + +

              Android Tutorials

              + +

              Watch the following video as PyTorch Partner Engineer Brad Heintz walks through steps for setting up the PyTorch Runtime for Android projects:

              + +

              PyTorch Mobile Runtime for Android

              + +

              The corresponding code can be found here.

              + +

              Checkout our Mobile Performance Recipes which cover how to optimize your model and check if optimizations helped via benchmarking.

              + +

              In addition, follow this recipe to learn how to make Native Android Application that use PyTorch prebuilt libraries.

              + +

              API Docs

              + +

              You can find more details about the PyTorch Android API in the Javadoc.

              + + + + + + +
              +
              +
              +
              +
              +
              +
              + +
              +
              +
              +
              +

              Docs

              +

              Access comprehensive developer documentation for PyTorch

              + View Docs +
              + +
              +

              Tutorials

              +

              Get in-depth tutorials for beginners and advanced developers

              + View Tutorials +
              + +
              +

              Resources

              +

              Find development resources and get your questions answered

              + View Resources +
              +
              +
              +
              + +
              + +
              + +
              +
              +
              +
              + + +
              +
              +
              + + +
              + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/mobile/home/index.html b/mobile/home/index.html new file mode 100644 index 000000000000..6678c6fd4efe --- /dev/null +++ b/mobile/home/index.html @@ -0,0 +1,425 @@ + + + + + + + + + + + + + Home | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
              +
              + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
              +
              +
              +
              + + +
              + + + + + + + + +
              + +
              +
              + + +
              + + + +
              +
              +

              PyTorch Mobile

              + +

              End-to-end workflow from Training to Deployment for iOS and Android mobile devices

              +
              +
              + +
              +
              + + +
              +
              +
              +
              +

              Shortcuts

              +
                +
                + + +
                +
                +
                +
                +
                +

                Note

                +

                PyTorch Mobile is no longer actively supported. Please check out ExecuTorch, PyTorch’s all-new on-device inference library.

                +
                + +

                PyTorch Mobile

                + +

                There is a growing need to execute ML models on edge devices to reduce latency, preserve privacy, and enable new interactive use cases.

                + +

                The PyTorch Mobile runtime beta release allows you to seamlessly go from training a model to deploying it, while staying entirely within the PyTorch ecosystem. It provides an end-to-end workflow that simplifies the research to production environment for mobile devices. In addition, it paves the way for privacy-preserving features via federated learning techniques.

                + +

                PyTorch Mobile is in beta stage right now, and is already in wide scale production use. It will soon be available as a stable release once the APIs are locked down.

                + +

                Key features

                +
                  +
                • Available for iOS, Android and Linux
                • +
                • Provides APIs that cover common preprocessing and integration tasks needed for incorporating ML in mobile applications
                • +
                • Support for tracing and scripting via TorchScript IR
                • +
                • Support for XNNPACK floating point kernel libraries for Arm CPUs
                • +
                • Integration of QNNPACK for 8-bit quantized kernels. Includes support for per-channel quantization, dynamic quantization and more
                • +
                • Provides an efficient mobile interpreter in Android and iOS. Also supports build level optimization and selective compilation depending on the operators needed for user applications (i.e., the final binary size of the app is determined by the actual operators the app needs).
                • +
                • Streamline model optimization via optimize_for_mobile
                • +
                • Support for hardware backends like GPU, DSP, and NPU will be available soon in Beta
                • +
                + +

                Prototypes

                +

                We have launched the following features in prototype, available in the PyTorch nightly releases, and would love to get your feedback on the PyTorch forums:

                + + + +

                Deployment workflow

                + +

                A typical workflow from training to mobile deployment with the optional model optimization steps is outlined in the following figure.

                +
                + +
                + +

                Examples to get you started

                + + + +

                Demo apps

                + +

                Our new demo apps also include examples of image segmentation, object detection, neural machine translation, +question answering, and vision transformers. They are available on both iOS and Android:

                + + + + + + + + +
                +
                +
                +
                +
                +
                +
                + +
                +
                +
                +
                +

                Docs

                +

                Access comprehensive developer documentation for PyTorch

                + View Docs +
                + +
                +

                Tutorials

                +

                Get in-depth tutorials for beginners and advanced developers

                + View Tutorials +
                + +
                +

                Resources

                +

                Find development resources and get your questions answered

                + View Resources +
                +
                +
                +
                + +
                + +
                + +
                +
                +
                +
                + + +
                +
                +
                + + +
                + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/mobile/index.html b/mobile/index.html new file mode 100644 index 000000000000..c4f7e74af638 --- /dev/null +++ b/mobile/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

                Redirecting…

                + Click here if you are not redirected. + diff --git a/mobile/ios/index.html b/mobile/ios/index.html new file mode 100644 index 000000000000..3c97b05c5ee3 --- /dev/null +++ b/mobile/ios/index.html @@ -0,0 +1,657 @@ + + + + + + + + + + + + + iOS | PyTorch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                +
                + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
                +
                +
                +
                + + +
                + + + + + + + + +
                + +
                +
                + + +
                + + + +
                +
                +

                PyTorch Mobile

                + +

                End-to-end workflow from Training to Deployment for iOS and Android mobile devices

                +
                +
                + +
                +
                + + +
                +
                +
                +
                +

                Shortcuts

                +
                  +
                  + + +
                  +
                  +
                  +
                  +
                  +

                  Note

                  +

                  PyTorch Mobile is no longer actively supported. Please check out ExecuTorch, PyTorch’s all-new on-device inference library. You can also review this page to learn more about how to use ExecuTorch to build an iOS app.

                  +
                  + +

                  iOS

                  + +

                  To get started with PyTorch on iOS, we recommend exploring the following HelloWorld.

                  + +

                  Quickstart with a Hello World Example

                  + +

                  HelloWorld is a simple image classification application that demonstrates how to use PyTorch C++ libraries on iOS. The code is written in Swift and uses Objective-C as a bridge.

                  + +

                  Requirements

                  + +
                    +
                  • XCode 11.0 or above
                  • +
                  • iOS 12.0 or above
                  • +
                  + +

                  Model Preparation

                  + +

                  Let’s start with model preparation. If you are familiar with PyTorch, you probably should already know how to train and save your model. In case you don’t, we are going to use a pre-trained image classification model - MobileNet v2, which is already packaged in TorchVision. To install it, run the command below.

                  + +
                  +

                  We highly recommend following the Pytorch Github page to set up the Python development environment on your local machine.

                  +
                  + +
                  pip install torchvision
                  +
                  + +

                  Once we have TorchVision installed successfully, let’s navigate to the HelloWorld folder and run trace_model.py. The script contains the code of tracing and saving a torchscript model that can be run on mobile devices.

                  + +
                  python trace_model.py
                  +
                  + +

                  If everything works well, model.pt should be generated and saved in the HelloWorld/HelloWorld/model folder.

                  + +
                  +

                  To find out more details about TorchScript, please visit tutorials on pytorch.org

                  +
                  + +

                  Install LibTorch-Lite via Cocoapods

                  + +

                  The PyTorch C++ library is available in Cocoapods, to integrate it to our project, simply run

                  + +
                  pod install
                  +
                  + +

                  Now it’s time to open the HelloWorld.xcworkspace in XCode, select an iOS simulator and launch it (cmd + R). If everything works well, we should see a wolf picture on the simulator screen along with the prediction result.

                  + +

                  + +

                  Code Walkthrough

                  + +

                  In this part, we are going to walk through the code step by step.

                  + +

                  Image Loading

                  + +

                  Let’s begin with image loading.

                  + +
                  let image = UIImage(named: "image.jpg")!
                  +imageView.image = image
                  +let resizedImage = image.resized(to: CGSize(width: 224, height: 224))
                  +guard var pixelBuffer = resizedImage.normalized() else {
                  +    return
                  +}
                  +
                  + +

                  We first load the image from our bundle and resize it to 224x224. Then we call this normalized() category method to normalize the pixel buffer. Let’s take a closer look at the code below.

                  + +
                  var normalizedBuffer: [Float32] = [Float32](repeating: 0, count: w * h * 3)
                  +// normalize the pixel buffer
                  +// see https://pytorch.org/hub/pytorch_vision_resnet/ for more detail
                  +for i in 0 ..< w * h {
                  +    normalizedBuffer[i]             = (Float32(rawBytes[i * 4 + 0]) / 255.0 - 0.485) / 0.229 // R
                  +    normalizedBuffer[w * h + i]     = (Float32(rawBytes[i * 4 + 1]) / 255.0 - 0.456) / 0.224 // G
                  +    normalizedBuffer[w * h * 2 + i] = (Float32(rawBytes[i * 4 + 2]) / 255.0 - 0.406) / 0.225 // B
                  +}
                  +
                  + +

                  The code might look weird at first glance, but it’ll make sense once we understand our model. The input data is a 3-channel RGB image of shape (3 x H x W), where H and W are expected to be at least 224. The image has to be loaded in to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] and std = [0.229, 0.224, 0.225].

                  + +

                  TorchScript Module

                  + +

                  Now that we have preprocessed our input data and we have a pre-trained TorchScript model, the next step is to use them to run prediction. To do that, we’ll first load our model into the application.

                  + +
                  private lazy var module: TorchModule = {
                  +    if let filePath = Bundle.main.path(forResource: "model", ofType: "pt"),
                  +        let module = TorchModule(fileAtPath: filePath) {
                  +        return module
                  +    } else {
                  +        fatalError("Can't find the model file!")
                  +    }
                  +}()
                  +
                  +

                  Note that the TorchModule Class is an Objective-C wrapper of torch::jit::mobile::Module.

                  + +
                  torch::jit::mobile::Module module = torch::jit::_load_for_mobile(filePath.UTF8String);
                  +
                  +

                  Since Swift can not talk to C++ directly, we have to either use an Objective-C class as a bridge, or create a C wrapper for the C++ library. For demo purpose, we’re going to wrap everything in this Objective-C class.

                  + +

                  Run Inference

                  + +

                  Now it’s time to run inference and get the results.

                  + +
                  guard let outputs = module.predict(image: UnsafeMutableRawPointer(&pixelBuffer)) else {
                  +    return
                  +}
                  +
                  +

                  Again, the predict method is just an Objective-C wrapper. Under the hood, it calls the C++ forward function. Let’s take a look at how it’s implemented.

                  + +
                  at::Tensor tensor = torch::from_blob(imageBuffer, {1, 3, 224, 224}, at::kFloat);
                  +c10::InferenceMode guard;
                  +auto outputTensor = _impl.forward({tensor}).toTensor();
                  +float* floatBuffer = outputTensor.data_ptr<float>();
                  +
                  +

                  The C++ function torch::from_blob will create an input tensor from the pixel buffer. Note that the shape of the tensor is {1,3,224,224} which represents {N, C, H, W} as we discussed in the above section.

                  + +
                  c10::InferenceMode guard;
                  +
                  +

                  The above line tells PyTorch to do inference only.

                  + +

                  Finally, we can call this forward function to get the output tensor and convert it to a float buffer.

                  + +
                  auto outputTensor = _impl.forward({tensor}).toTensor();
                  +float* floatBuffer = outputTensor.data_ptr<float>();
                  +
                  + +

                  Collect Results

                  + +

                  The output tensor is a one-dimensional float array of shape 1x1000, where each value represents the confidence that a label is predicted from the image. The code below sorts the array and retrieves the top three results.

                  + +
                  let zippedResults = zip(labels.indices, outputs)
                  +let sortedResults = zippedResults.sorted { $0.1.floatValue > $1.1.floatValue }.prefix(3)
                  +
                  + +

                  PyTorch Demo App

                  + +

                  For more complex use cases, we recommend to check out the PyTorch demo application. The demo app contains two showcases. A camera app that runs a quantized model to predict the images coming from device’s rear-facing camera in real time. And a text-based app that uses a text classification model to predict the topic from the input string.

                  + +

                  More PyTorch iOS Demo Apps

                  + +

                  Image Segmentation

                  + +

                  Image Segmentation demonstrates a Python script that converts the PyTorch DeepLabV3 model for mobile apps to use and an iOS app that uses the model to segment images.

                  + +

                  Object Detection

                  + +

                  Object Detection demonstrates how to convert the popular YOLOv5 model and use it on an iOS app that detects objects from pictures in your photos, taken with camera, or with live camera.

                  + +

                  Neural Machine Translation

                  + +

                  Neural Machine Translation demonstrates how to convert a sequence-to-sequence neural machine translation model trained with the code in the PyTorch NMT tutorial and use the model in an iOS app to do French-English translation.

                  + +

                  Question Answering

                  + +

                  Question Answering demonstrates how to convert a powerful transformer QA model and use the model in an iOS app to answer questions about PyTorch Mobile and more.

                  + +

                  Vision Transformer

                  + +

                  Vision Transformer demonstrates how to use Facebook’s latest Vision Transformer DeiT model to do image classification, and how convert another Vision Transformer model and use it in an iOS app to perform handwritten digit recognition.

                  + +

                  Speech recognition

                  + +

                  Speech Recognition demonstrates how to convert Facebook AI’s wav2vec 2.0, one of the leading models in speech recognition, to TorchScript and how to use the scripted model in an iOS app to perform speech recognition.

                  + +

                  Video Classification

                  + +

                  TorchVideo demonstrates how to use a pre-trained video classification model, available at the newly released PyTorchVideo, on iOS to see video classification results, updated per second while the video plays, on tested videos, videos from the Photos library, or even real-time videos.

                  + +

                  PyTorch iOS Tutorial and Recipes

                  + +

                  Image Segmentation DeepLabV3 on iOS

                  + +

                  A comprehensive step-by-step tutorial on how to prepare and run the PyTorch DeepLabV3 image segmentation model on iOS.

                  + +

                  PyTorch Mobile Performance Recipes

                  + +

                  List of recipes for performance optimizations for using PyTorch on Mobile.

                  + +

                  Fuse Modules recipe

                  + +

                  Learn how to fuse a list of PyTorch modules into a single module to reduce the model size before quantization.

                  + +

                  Quantization for Mobile Recipe

                  + +

                  Learn how to reduce the model size and make it run faster without losing much on accuracy.

                  + +

                  Script and Optimize for Mobile

                  + +

                  Learn how to convert the model to TorchScipt and (optional) optimize it for mobile apps.

                  + +

                  Model Preparation for iOS Recipe

                  + +

                  Learn how to add the model in an iOS project and use PyTorch pod for iOS.

                  + +

                  Build PyTorch iOS Libraries from Source

                  + +

                  To track the latest updates for iOS, you can build the PyTorch iOS libraries from the source code.

                  + +
                  git clone --recursive https://github.com/pytorch/pytorch
                  +cd pytorch
                  +# if you are updating an existing checkout
                  +git submodule sync
                  +git submodule update --init --recursive
                  +
                  + +
                  +

                  Make sure you have cmake and Python installed correctly on your local machine. We recommend following the Pytorch Github page to set up the Python development environment

                  +
                  + +

                  Build LibTorch-Lite for iOS Simulators

                  + +

                  Open terminal and navigate to the PyTorch root directory. Run the following command (if you already build LibTorch-Lite for iOS devices (see below), run rm -rf build_ios first):

                  + +
                  BUILD_PYTORCH_MOBILE=1 IOS_PLATFORM=SIMULATOR ./scripts/build_ios.sh
                  +
                  +

                  After the build succeeds, all static libraries and header files will be generated under build_ios/install

                  + +

                  Build LibTorch-Lite for arm64 Devices

                  + +

                  Open terminal and navigate to the PyTorch root directory. Run the following command (if you already build LibTorch-Lite for iOS simulators, run rm -rf build_ios first):

                  + +
                  BUILD_PYTORCH_MOBILE=1 IOS_ARCH=arm64 ./scripts/build_ios.sh
                  +
                  +

                  After the build succeeds, all static libraries and header files will be generated under build_ios/install

                  + +

                  XCode Setup

                  + +

                  Open your project in XCode, go to your project Target’s Build Phases - Link Binaries With Libraries, click the + sign and add all the library files located in build_ios/install/lib. Navigate to the project Build Settings, set the value Header Search Paths to build_ios/install/include and Library Search Paths to build_ios/install/lib.

                  + +

                  In the build settings, search for other linker flags. Add a custom linker flag below

                  + +
                  -all_load
                  +
                  + +

                  To use the custom built libraries the project, replace #import <LibTorch-Lite/LibTorch-Lite.h> (in TorchModule.mm) which is needed when using LibTorch-Lite via Cocoapods with the code below:

                  +
                  #include <torch/csrc/jit/mobile/import.h>
                  +#include <torch/csrc/jit/mobile/module.h>
                  +#include <torch/script.h>
                  +
                  + +

                  Finally, disable bitcode for your target by selecting the Build Settings, searching for Enable Bitcode, and set the value to No.

                  + +

                  Using the Nightly PyTorch iOS Libraries in CocoaPods

                  +

                  If you want to try out the latest features added to PyTorch iOS, you can use the LibTorch-Lite-Nightly pod in your Podfile, it includes the nightly built libraries:

                  +
                  pod 'LibTorch-Lite-Nightly'
                  +
                  +

                  And then run pod install to add it to your project. If you wish to update the nightly pod to the newer one, you can run pod update to get the latest version. But be aware you may need to build the model used on mobile in the latest PyTorch - using either the latest PyTorch code or a quick nightly install with commands like pip install --pre torch torchvision -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html - to avoid possible model version mismatch errors when running the model on mobile.

                  + +

                  Custom Build

                  + +

                  Starting from 1.4.0, PyTorch supports custom build. You can now build the PyTorch library that only contains the operators needed by your model. To do that, follow the steps below

                  + +

                  1. Verify your PyTorch version is 1.4.0 or above. You can do that by checking the value of torch.__version__.

                  + +

                  2. To dump the operators in your model, say MobileNetV2, run the following lines of Python code:

                  + +
                  import torch, yaml
                  +model = torch.jit.load('MobileNetV2.pt')
                  +ops = torch.jit.export_opnames(model)
                  +with open('MobileNetV2.yaml', 'w') as output:
                  +    yaml.dump(ops, output)
                  +
                  +

                  In the snippet above, you first need to load the ScriptModule. Then, use export_opnames to return a list of operator names of the ScriptModule and its submodules. Lastly, save the result in a yaml file.

                  + +

                  3. To run the iOS build script locally with the prepared yaml list of operators, pass in the yaml file generate from the last step into the environment variable SELECTED_OP_LIST. Also in the arguments, specify BUILD_PYTORCH_MOBILE=1 as well as the platform/architechture type. Take the arm64 build for example, the command should be:

                  + +
                  SELECTED_OP_LIST=MobileNetV2.yaml BUILD_PYTORCH_MOBILE=1 IOS_ARCH=arm64 ./scripts/build_ios.sh
                  +
                  +

                  4. After the build succeeds, you can integrate the result libraries to your project by following the XCode Setup section above.

                  + +

                  5. The last step is to add a single line of C++ code before running forward. This is because by default JIT will do some optimizations on operators (fusion for example), which might break the consistency with the ops we dumped from the model.

                  + +
                  torch::jit::GraphOptimizerEnabledGuard guard(false);
                  +
                  + +

                  Use PyTorch JIT interpreter

                  +

                  PyTorch JIT interpreter is the default interpreter before 1.9 (a version of our PyTorch interpreter that is not as size-efficient). It will still be supported in 1.9, and can be used in CocoaPods:

                  +
                  pod 'LibTorch', '~>1.9.0'
                  +
                  + +

                  iOS Tutorials

                  + +

                  Watch the following video as PyTorch Partner Engineer Brad Heintz walks through steps for setting up the PyTorch Runtime for iOS projects:

                  + +

                  PyTorch Mobile Runtime for iOS

                  + +

                  The corresponding code can be found here.

                  + +

                  Additionally, checkout our Mobile Performance Recipes which cover how to optimize your model and check if optimizations helped via benchmarking.

                  + +

                  API Docs

                  + +

                  Currently, the iOS framework uses the Pytorch C++ front-end APIs directly. The C++ document can be found here. To learn more about it, we recommend exploring the C++ front-end tutorials on PyTorch webpage.

                  + +

                  Issues and Contribution

                  + +

                  If you have any questions or want to contribute to PyTorch, please feel free to drop issues or open a pull request to get in touch.

                  + + + + + + +
                  +
                  +
                  +
                  +
                  +
                  +
                  + +
                  +
                  +
                  +
                  +

                  Docs

                  +

                  Access comprehensive developer documentation for PyTorch

                  + View Docs +
                  + +
                  +

                  Tutorials

                  +

                  Get in-depth tutorials for beginners and advanced developers

                  + View Tutorials +
                  + +
                  +

                  Resources

                  +

                  Find development resources and get your questions answered

                  + View Resources +
                  +
                  +
                  +
                  + +
                  + +
                  + +
                  +
                  +
                  +
                  + + +
                  +
                  +
                  + + +
                  + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/news/news-item-1.html b/news/news-item-1.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/news/news-item-1.html @@ -0,0 +1 @@ + diff --git a/news/news-item-2.html b/news/news-item-2.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/news/news-item-2.html @@ -0,0 +1 @@ + diff --git a/news/news-item-3.html b/news/news-item-3.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/news/news-item-3.html @@ -0,0 +1 @@ + diff --git a/past_issues/2021-03-11-issue-1.html b/past_issues/2021-03-11-issue-1.html new file mode 100644 index 000000000000..1f31867d20a4 --- /dev/null +++ b/past_issues/2021-03-11-issue-1.html @@ -0,0 +1,40 @@ +

                  Issue #1

                  + +

                  Welcome to the first issue of the PyTorch Contributors newsletter! Keeping track of everything that’s happening in the PyTorch developer world is a big task; here you will find curated news including RFCs, feature roadmaps, notable PRs, editorials from developers, and more. If you have questions or suggestions for the newsletter, just reply back to this email.

                  + +

                  PyTorch 1.8.0

                  + +

                  PyTorch 1.8 was released on March 4th with support for functional transformations using torch.fx, stabilized frontend APIs for scientific computing (torch.fft, torch.linalg, Autograd for complex tensors) and significant improvements to distributed training. Read the full Release Notes.

                  + +

                  PyTorch Ecosystem Day

                  + +

                  On April 21, we’re hosting a virtual event for our ecosystem and industry communities to showcase their work and discover new opportunities to collaborate. The day will be filled with discussion on new developments, trends, challenges and best practices through posters, breakout sessions and networking.

                  + +

                  The PyTorch open source process

                  + +

                  @ezyang describes the challenges of maintaining a PyTorch-scale project, and the current open source processes (triaging and CI oncalls, RFC discussions) to help PyTorch operate effectively.

                  + +

                  Developers forum

                  + +

                  We launched https://dev-discuss.pytorch.org/ a low-traffic high-signal forum for long-form discussions about PyTorch internals.

                  + +

                  [RFC] Dataloader v2

                  + +

                  @VitalyFedyunin proposes redesigning the DataLoader to support lazy loading, sharding, pipelining data operations (including async) and shuffling & sampling in a more modular way. Join the discussion here.

                  + +

                  [RFC] Improving TorchScript Usability

                  + +

                  In a series of 3 blog posts (1, 2, 3) @t-vi explores ideas to improve the user and developer experience of TorchScript.

                  + +

                  [RFC] CSR and DM storage formats for sparse tensors

                  + +

                  @pearu proposes an RFC to make linear algebra operations more performant by

                  + +
                    +
                  • implementing the CSR storage format, where a 2D array is defined by shape and 1D tensors for compressed row indices, column indices, and values (PyTorch 1D tensor)
                  • +
                  • introducing the Dimension Mapping storage format that generalizes a 2D CSR to multidimensional arrays using a bijective mapping between the storage and wrapper elements.
                  • +
                  + +

                  [RFC] Forward Mode AD

                  + +

                  @albanD proposes an RFC to implement forward mode autodiff using Tensor-based dual numbers, where the real part represents the tensor and the dual part stores the forward gradient of the tensor. The core of the feature has landed (PR), with more formulas in WIP. Complete forward mode AD is expected to land by July 2021.

                  diff --git a/past_issues/2021-05-11-issue-2.html b/past_issues/2021-05-11-issue-2.html new file mode 100644 index 000000000000..433d70e028a6 --- /dev/null +++ b/past_issues/2021-05-11-issue-2.html @@ -0,0 +1,38 @@ +

                  Issue #2

                  + +

                  Welcome to the second edition of the PyTorch newsletter! In this issue, read about how we celebrated the PyTorch community at the first-ever PyTorch Ecosystem Day (PTED), discover a new podcast for PyTorch developers, and learn about important updates to the PyTorch frontend.

                  + +

                  PyTorch Ecosystem Day

                  + +

                  Piotr Bialecki (Sr. Software Engineer, NVIDIA) spoke about his journey of using PyTorch and what he sees in the future for PyTorch. Miquel Farré (Sr. Technology Manager, Disney) spoke about the Creative Genome project that uses the PyTorch ecosystem to annotate all Disney content. Ritchie Ng (CEO, Hessian Matrix) spoke about the growth of AI in the Asia Pacific region, and how to get started with PyTorch for production AI use cases. Members of the community showcased how they were using PyTorch via 71 posters and pop-up breakout sessions. See all of the posters and listen to the opening keynote talks here!

                  + +

                  PyTorch Developer Podcast

                  + +

                  Edward Yang (Research Engineer, Facebook AI) talks about internal development concepts like binding C++ in Python, the dispatcher, PyTorch’s library structure and more. Check out this new series; each episode is around 15 minutes long. Listen to it wherever you get your podcasts.

                  + +

                  Forward Mode AD

                  +

                  The core logic for Forward Mode AD (based on “dual tensors”) is now in PyTorch. All the APIs to manipulate such Tensors, codegen and view handling are in master (1.9.0a0) already. Gradcheck and a first set of formulas will be added in the following month; full support for all PyTorch functions, custom Autograd functions and higher order gradients will happen later this year. Read more about this or share your feedback with @albanD on the corresponding RFC.

                  + +

                  Make complex conjugation lazy

                  + +

                  PR #54987 makes the conjugate operation on complex tensors return a view that has a special is_conj() bit flipped. Aside from saving memory by not creating a full tensor, this grants a potential speedup if the following operation can handle conjugated inputs directly. For such operations (like gemm), a flag is passed to the low-level API; for others the conjugate is materialized before passing to the operation.

                  + +

                  torch.use_deterministic_algorithms is stable

                  + +

                  torch.use_deterministic_algorithms() (docs) is stable in master (1.9.0a0). If True, the flag switches non-deterministic operations to their deterministic implementation if available, and throws a RuntimeError if not.

                  + +

                  torch.linalg and torch.special

                  + +

                  torch.linalg is now stable; the module maintains fidelity with NumPy’s np.linalg linear algebra functions. +torch.special (beta) contains functions in scipy.special. Here’s the tracking issue if you’d like to contribute functions to torch.special. If you want a function not already on the list, let us know on the tracking issue about your use case and why it should be added.

                  + +

                  Generalizing AMP to work on CPU

                  + +
                  +

                  @ezyang: Intel is interested in bringing automatic mixed precision to CPU in [RFC] Extend Autocast to CPU/CUDA with BF16 data type · Issue #55374 · pytorch/pytorch · One big question is what the API for autocasting should be for CPU; should we provide a single, generalized API torch.autocast (keep in mind that CPU autocasting would be through bfloat16, while the existing GPU autocasting is via float16), or provide separate APIs for CPU/CUDA? If you have any thoughts or opinions on the subject, please chime in on the issue.

                  +
                  + +


                  +

                  + +

                  Are you enjoying reading this newsletter? What would you like to know more about? All feedback is welcome and appreciated! To share your suggestions, use this form or simply reply to this email.

                  diff --git a/preview_setup.sh b/preview_setup.sh new file mode 100755 index 000000000000..14052d9e5d47 --- /dev/null +++ b/preview_setup.sh @@ -0,0 +1,6 @@ +#!/bin/bash +rm -rf pytorch.github.io +git clone --recursive https://github.com/pytorch/pytorch.github.io.git -b site --depth 1 +cp *.md pytorch.github.io/_hub +cp images/* pytorch.github.io/assets/images/ + diff --git a/previous-versions.html b/previous-versions.html new file mode 100644 index 000000000000..4aab09c5eec4 --- /dev/null +++ b/previous-versions.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

                  Redirecting…

                  + Click here if you are not redirected. + diff --git a/redirects.json b/redirects.json new file mode 100644 index 000000000000..0972608ce821 --- /dev/null +++ b/redirects.json @@ -0,0 +1 @@ +{"/get-started/":"https://pytorch.org/get-started/locally/","/previous-versions.html":"https://pytorch.org/get-started/previous-versions/","/ecosystem/Captum/":"https://captum.ai/","/ecosystem/Flair/":"https://github.com/flairNLP/flair","/ecosystem/Ignite/":"https://github.com/pytorch/ignite","/ecosystem/advertorch/":"https://github.com/BorealisAI/advertorch","/ecosystem/allennlp/":"https://allennlp.org/","/ecosystem/botorch/":"https://botorch.org/","/ecosystem/crypten/":"https://github.com/facebookresearch/CrypTen","/ecosystem/fastai/":"https://docs.fast.ai","/ecosystem/glow/":"https://github.com/pytorch/glow","/ecosystem/gpytorch/":"https://cornellius-gp.github.io/","/ecosystem/horovod/":"http://horovod.ai","/ecosystem/parlai/":"http://parl.ai/","/ecosystem/pennylane/":"https://pennylane.ai/","/ecosystem/pyro/":"http://pyro.ai/","/ecosystem/pysyft/":"https://github.com/OpenMined/PySyft","/ecosystem/pytorch-geometric/":"https://github.com/pyg-team/pytorch_geometric/","/ecosystem/pytorch-lightning/":"https://github.com/williamFalcon/pytorch-lightning","/ecosystem/roma/":"https://github.com/naver/roma","/ecosystem/skorch/":"https://github.com/skorch-dev/skorch","/ecosystem/tensorly/":"http://tensorly.org/stable/home.html","/mobile/":"https://pytorch.org/mobile/home/","/resources/contributors/":"https://pytorch.org/newsletter","/ecosystem/":"https://landscape.pytorch.org/","/ecosystem/join.html":"https://github.com/pytorch-fdn/ecosystem"} \ No newline at end of file diff --git a/resources/contributors/index.html b/resources/contributors/index.html new file mode 100644 index 000000000000..cc6c293ce476 --- /dev/null +++ b/resources/contributors/index.html @@ -0,0 +1,11 @@ + + + + Redirecting… + + + + +

                  Redirecting…

                  + Click here if you are not redirected. + diff --git a/scripts/add_noindex_tags.sh b/scripts/add_noindex_tags.sh deleted file mode 100755 index 73b5a6e85636..000000000000 --- a/scripts/add_noindex_tags.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -# Adds tags to all html files in a -# directory (recursively) -# -# Usage: -# ./add_noindex_tags.sh directory -# -# Example (from the root directory) -# ./scripts/add_no_index_tags.sh docs/1.6.0 -if [ "$1" == "" ]; then - echo "Incorrect usage. Correct Usage: add_no_index_tags.sh " - exit 1 -fi -find $1 -name "*.html" -print0 | xargs -0 sed -i '//a \ \ ' diff --git a/scripts/deploy-site.sh b/scripts/deploy-site.sh deleted file mode 100755 index 43258031d5af..000000000000 --- a/scripts/deploy-site.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/bin/bash -# ideas used from https://gist.github.com/motemen/8595451 - -# Based on https://github.com/eldarlabs/ghpages-deploy-script/blob/master/scripts/deploy-ghpages.sh -# Used with their MIT license https://github.com/eldarlabs/ghpages-deploy-script/blob/master/LICENSE - -# abort the script if there is a non-zero error -set -ex - -# initialize hub submodule -git submodule deinit -f . && git submodule update --init --recursive - -# use latest hub -./_devel/update_hub_submodule.sh - -# Files not related to build should be deleted. -pushd _hub -rm -R `ls -1 -d */` -rm -f README.md -popd - -# show where we are on the machine -pwd -remote=$(git config remote.origin.url) - -# make a directory to put the master branch -mkdir master-branch -cd master-branch -# now lets setup a new repo so we can update the master branch -git init -git remote add origin "$remote" -git fetch --depth 1 - -# switch into the the master branch -if git rev-parse --verify origin/master > /dev/null 2>&1 -then - git checkout master - # delete any old site as we are going to replace it - # Note: this explodes if there aren't any, so moving it here for now - git rm -rf . -else - git checkout --orphan master -fi - -cd "../" -make build_deploy -cd master-branch - -# copy over or recompile the new site -cp -a "../_site/." . - -# have small jekyll config to allow underscores -echo "include: [_static, _images, _modules, _sources, _asserts.html, _creation.html, _comparison.html, _lowrank.html, _script.html, _diagnostic.html, _dynamo.html, _serialization.html, _type_utils, _tensor_str.html, _trace.html, _utils.html, _internal, _C, _distributed_autograd.html, _distributed_c10d.html, _distributed_rpc.html, _fft.html, _linalg.html, _monitor.html, _nested.html, _nn.html, _profiler.html, _sparse.html, _special.html, __config__.html, _dynamo, _lobpcg.html, _jit_internal.html, _numeric_suite.html, _numeric_suite_fx.html, _sanitizer.html, _symbolic_trace.html, _async.html, _freeze.html, _fuser.html, _type_utils.html, _utils ]" > _config.yml - -# stage any changes and new files -git add -A -# now commit, ignoring branch master doesn't seem to work, so trying skip -git commit --allow-empty -m "Deploy to GitHub Pages on master [ci skip]" -# and push, but send any output to /dev/null to hide anything sensitive -git push --force --quiet https://pytorchbot:$SECRET_PYTORCHBOT_TOKEN@github.com/pytorch/pytorch.github.io.git master -# go back to where we started and remove the master git repo we made and used -# for deployment -cd .. -rm -rf master-branch - -echo "Finished Deployment!" diff --git a/scripts/gen_quick_start_module.py b/scripts/gen_quick_start_module.py deleted file mode 100755 index 5fd20d79949f..000000000000 --- a/scripts/gen_quick_start_module.py +++ /dev/null @@ -1,271 +0,0 @@ -#!/usr/bin/env python3 -""" -Generates quick start module for https://pytorch.org/get-started/locally/ page -If called from update-quick-start-module.yml workflow (--autogenerate parameter set) -Will output new quick-start-module.js, and new published_version.json file -based on the current release matrix. -If called standalone will generate quick-start-module.js from existing -published_version.json file -""" - -import argparse -import copy -import json -from enum import Enum -from pathlib import Path -from typing import Dict - -BASE_DIR = Path(__file__).parent.parent - - -class OperatingSystem(Enum): - LINUX: str = "linux" - WINDOWS: str = "windows" - MACOS: str = "macos" - - -PRE_CXX11_ABI = "pre-cxx11" -CXX11_ABI = "cxx11-abi" -DEBUG = "debug" -RELEASE = "release" -DEFAULT = "default" -ENABLE = "enable" -DISABLE = "disable" -MACOS = "macos" - -# Mapping json to release matrix default values -acc_arch_ver_default = { - "nightly": { - "accnone": ("cpu", ""), - "cuda.x": ("cuda", "11.8"), - "cuda.y": ("cuda", "12.1"), - "cuda.z": ("cuda", "12.4"), - "rocm5.x": ("rocm", "6.0"), - }, - "release": { - "accnone": ("cpu", ""), - "cuda.x": ("cuda", "11.8"), - "cuda.y": ("cuda", "12.1"), - "cuda.z": ("cuda", "12.4"), - "rocm5.x": ("rocm", "6.0"), - }, -} - -# Initialize arch version to default values -# these default values will be overwritten by -# extracted values from the release marix -acc_arch_ver_map = acc_arch_ver_default - -LIBTORCH_DWNL_INSTR = { - PRE_CXX11_ABI: "Download here (Pre-cxx11 ABI):", - CXX11_ABI: "Download here (cxx11 ABI):", - RELEASE: "Download here (Release version):", - DEBUG: "Download here (Debug version):", - MACOS: "Download arm64 libtorch here (ROCm and CUDA are not supported):", -} - - -def load_json_from_basedir(filename: str): - try: - with open(BASE_DIR / filename) as fptr: - return json.load(fptr) - except FileNotFoundError as exc: - raise ImportError(f"File {filename} not found error: {exc.strerror}") from exc - except json.JSONDecodeError as exc: - raise ImportError(f"Invalid JSON {filename}") from exc - - -def read_published_versions(): - return load_json_from_basedir("published_versions.json") - - -def write_published_versions(versions): - with open(BASE_DIR / "published_versions.json", "w") as outfile: - json.dump(versions, outfile, indent=2) - -# Create releases JSON for PyTorch website. -# Matrix is being used to populate config data for -# the "Start Locally" installation options table. -def write_releases_file(matrix): - with open(BASE_DIR / "releases.json", "w") as outfile: - json.dump(matrix, outfile, indent=2) - -def read_matrix_for_os(osys: OperatingSystem, channel: str): - jsonfile = load_json_from_basedir(f"{osys.value}_{channel}_matrix.json") - return jsonfile["include"] - - -def read_quick_start_module_template(): - with open(BASE_DIR / "_includes" / "quick-start-module.js") as fptr: - return fptr.read() - - -def get_package_type(pkg_key: str, os_key: OperatingSystem) -> str: - if pkg_key != "pip": - return pkg_key - return "manywheel" if os_key == OperatingSystem.LINUX.value else "wheel" - - -def get_gpu_info(acc_key, instr, acc_arch_map): - gpu_arch_type, gpu_arch_version = acc_arch_map[acc_key] - if DEFAULT in instr: - gpu_arch_type, gpu_arch_version = acc_arch_map["accnone"] - return (gpu_arch_type, gpu_arch_version) - - -# This method is used for generating new published_versions.json file -# It will modify versions json object with installation instructions -# Provided by generate install matrix Github Workflow, stored in release_matrix -# json object. -def update_versions(versions, release_matrix, release_version): - version = "preview" - template = "preview" - acc_arch_map = acc_arch_ver_map[release_version] - - if release_version != "nightly": - version = release_matrix[OperatingSystem.LINUX.value][0]["stable_version"] - if version not in versions["versions"]: - versions["versions"][version] = copy.deepcopy( - versions["versions"][template] - ) - versions["latest_stable"] = version - - # Perform update of the json file from release matrix - for os_key, os_vers in versions["versions"][version].items(): - for pkg_key, pkg_vers in os_vers.items(): - for acc_key, instr in pkg_vers.items(): - package_type = get_package_type(pkg_key, os_key) - gpu_arch_type, gpu_arch_version = get_gpu_info( - acc_key, instr, acc_arch_map - ) - - pkg_arch_matrix = [ - x - for x in release_matrix[os_key] - if (x["package_type"], x["gpu_arch_type"], x["gpu_arch_version"]) - == (package_type, gpu_arch_type, gpu_arch_version) - ] - - if pkg_arch_matrix: - if package_type != "libtorch": - instr["command"] = pkg_arch_matrix[0]["installation"] - else: - if os_key == OperatingSystem.LINUX.value: - rel_entry_dict = { - x["devtoolset"]: x["installation"] - for x in pkg_arch_matrix - if x["libtorch_variant"] == "shared-with-deps" - } - if instr["versions"] is not None: - for ver in [CXX11_ABI, PRE_CXX11_ABI]: - # temporarily remove setting pre-cxx11-abi. For Release 2.7 we - # should remove pre-cxx11-abi completely. - if ver == PRE_CXX11_ABI: - continue - else: - instr["versions"][LIBTORCH_DWNL_INSTR[ver]] = ( - rel_entry_dict[ver] - ) - - elif os_key == OperatingSystem.WINDOWS.value: - rel_entry_dict = { - x["libtorch_config"]: x["installation"] - for x in pkg_arch_matrix - } - if instr["versions"] is not None: - for ver in [RELEASE, DEBUG]: - instr["versions"][LIBTORCH_DWNL_INSTR[ver]] = ( - rel_entry_dict[ver] - ) - elif os_key == OperatingSystem.MACOS.value: - if instr["versions"] is not None: - instr["versions"][LIBTORCH_DWNL_INSTR[MACOS]] = ( - pkg_arch_matrix[0]["installation"] - ) - - -# This method is used for generating new quick-start-module.js -# from the versions json object -def gen_install_matrix(versions) -> Dict[str, str]: - result = {} - version_map = { - "preview": "preview", - "stable": versions["latest_stable"], - } - for ver, ver_key in version_map.items(): - for os_key, os_vers in versions["versions"][ver_key].items(): - for pkg_key, pkg_vers in os_vers.items(): - for acc_key, instr in pkg_vers.items(): - extra_key = "python" if pkg_key != "libtorch" else "cplusplus" - key = f"{ver},{pkg_key},{os_key},{acc_key},{extra_key}" - note = instr["note"] - lines = [note] if note is not None else [] - if pkg_key == "libtorch": - ivers = instr["versions"] - if ivers is not None: - lines += [ - f"{lab}
                  {val}" - for (lab, val) in ivers.items() - ] - else: - command = instr["command"] - if command is not None: - lines.append(command) - result[key] = "
                  ".join(lines) - return result - - -# This method is used for extracting two latest verisons of cuda and -# last verion of rocm. It will modify the acc_arch_ver_map object used -# to update getting started page. -def extract_arch_ver_map(release_matrix): - def gen_ver_list(chan, gpu_arch_type): - return { - x["desired_cuda"]: x["gpu_arch_version"] - for x in release_matrix[chan]["linux"] - if x["gpu_arch_type"] == gpu_arch_type - } - - for chan in ("nightly", "release"): - cuda_ver_list = gen_ver_list(chan, "cuda") - rocm_ver_list = gen_ver_list(chan, "rocm") - cuda_list = sorted(cuda_ver_list.values()) - acc_arch_ver_map[chan]["rocm5.x"] = ("rocm", max(rocm_ver_list.values())) - for cuda_ver, label in zip(cuda_list, ["cuda.x", "cuda.y", "cuda.z"]): - acc_arch_ver_map[chan][label] = ("cuda", cuda_ver) - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--autogenerate", dest="autogenerate", action="store_true") - parser.set_defaults(autogenerate=True) - - options = parser.parse_args() - versions = read_published_versions() - - if options.autogenerate: - release_matrix = {} - for val in ("nightly", "release"): - release_matrix[val] = {} - for osys in OperatingSystem: - release_matrix[val][osys.value] = read_matrix_for_os(osys, val) - - write_releases_file(release_matrix) - - extract_arch_ver_map(release_matrix) - for val in ("nightly", "release"): - update_versions(versions, release_matrix[val], val) - - write_published_versions(versions) - - template = read_quick_start_module_template() - versions_str = json.dumps(gen_install_matrix(versions)) - template = template.replace("{{ installMatrix }}", versions_str) - template = template.replace( - "{{ VERSION }}", f"\"Stable ({versions['latest_stable']})\"" - ) - print(template.replace("{{ ACC ARCH MAP }}", json.dumps(acc_arch_ver_map))) - - -if __name__ == "__main__": - main() diff --git a/style-guide.html b/style-guide.html index b0fe6c737a69..61244c6b6d6b 100644 --- a/style-guide.html +++ b/style-guide.html @@ -1,10 +1,118 @@ ---- -layout: default -body-class: style-guide -background-class: style-guide ---- + + + + + + + + + + + + + PyTorch + + + -
                  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                  +
                  + Join us at PyTorch Conference in San Francisco, October 22-23. CFP open now! Learn more. +
                  +
                  +
                  +
                  + + +
                  + + + + + + + + +
                  + +
                  +
                  + + +
                  + +

                  Header 1
                        Article Page

                  Jumbotron text. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua

                  @@ -16,10 +124,339 @@

                  Header 1
                        Article Page

                  - {% for post in site.style_guide %} - {{ post.content }} - {% endfor %} + +

                  Header 2

                  +

                  This is body copy. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea.

                  + +

                  Header 3

                  + +

                  This is body copy. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea.

                  + +

                  Header 4

                  + +

                  This is body copy. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea.

                  + +
                  Header 5
                  + +

                  This is body copy. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea.

                  + +
                  + +

                  This is more body copy with code snippets. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Here is an inline link. Ut enim ad minim veniam, quis nostrud torch.*.FloatTensor ullamco laboris nisi ut aliquip ex ea commodo consequat.

                  + +

                  This is italicized body copy. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat

                  + +

                  This is bolded body copy. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.

                  + +
                  + +

                  This is body copy before an unordered list. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea.

                  + +
                    +
                  • Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
                  • +
                  • Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
                  • +
                  • Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
                  • +
                  + +

                  This is body copy after an unordered list. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea.

                  + +
                    +
                  1. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
                  2. +
                  3. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
                  4. +
                  5. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
                  6. +
                  + +

                  This is body copy after an ordered list. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea.

                  + +
                  +
                  Definition list
                  +
                  Lorem ipsum dolor sit amet, consectetur adipiscing elit
                  + +
                  Definition list
                  +
                  Lorem ipsum dolor sit amet, consectetur adipiscing elit
                  + +
                  Definition list
                  +
                  Lorem ipsum dolor sit amet, consectetur adipiscing elit
                  +
                  + +
                  + +

                  Here's an image

                  + +
                  + +
                  +

                  “This is a blockquote. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat”

                  +
                  + +
                    brew install pytorch # Here is a small code block
                  +  brew install pytorch # Here is a small code block
                  +
                  + +
                  # Here is a large code block with syntax highlighting
                  +
                  +# !/usr/bin/python3
                  +
                  +# Dictionaries map keys to values.
                  +
                  +fred = { 'mike': 456, 'bill': 399, 'sarah': 521 }
                  +
                  +# Subscripts.
                  +try:
                  +    print(fred)
                  +    print(fred['bill'])
                  +    print(fred['nora'])
                  +    print("Won't see this!")
                  +except KeyError as rest:
                  +    print("Lookup failed:", rest)
                  +print()
                  +
                  +# Entries can be added, udated, or deleted.
                  +fred['bill'] = 'Sopwith Camel'
                  +fred['wilma'] = 2233
                  +del fred['mike']
                  +print(fred)
                  +print()
                  +
                  +# Get all the keys.
                  +print(fred.keys())
                  +for k in fred.keys():
                  +    print(k, "=>", fred[k])
                  +print()
                  +
                  +# Test for presence of a key.
                  +for t in [ 'zingo', 'sarah', 'bill', 'wilma' ]:
                  +    print(t,end=' ')
                  +    if t in fred:
                  +        print('=>', fred[t])
                  +    else:
                  +        print('is not present.')
                  +
                  + +

                  Here is a table:

                  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                  Datatype torch.dtypeTensor types
                  32-bit floating pointtorch.float32 or torch.floattorch.*.FloatTensor
                  64-bit floating pointtorch.float64 or torch.doubletorch.*.DoubleTensor
                  16-bit floating pointtorch.float16 or torch.halftorch.*.HalfTensor
                  8-bit integer (unsigned)torch.uint8torch.*.ByteTensor
                  8-bit integer (signed)torch.int8torch.*.CharTensor
                  16-bit integer (signed)torch.int16 or torch.shorttorch.*.ShortTensor
                  32-bit integer (signed)torch.int32 or torch.inttorch.*.IntTensor
                  64-bit integer (signed)torch.int64 or torch.longtorch.*.LongTensor
                  + + +
                  + + +
                  +
                  +
                  +
                  +

                  Docs

                  +

                  Access comprehensive developer documentation for PyTorch

                  + View Docs +
                  + +
                  +

                  Tutorials

                  +

                  Get in-depth tutorials for beginners and advanced developers

                  + View Tutorials +
                  + +
                  +

                  Resources

                  +

                  Find development resources and get your questions answered

                  + View Resources +
                  +
                  +
                  +
                  + +
                  + +
                  + +
                  +
                  +
                  +
                  + + +
                  +
                  +
                  + + +
                  + + + + + + + + + + + + + + + + + + + + + + + diff --git a/test_run_python_code.py b/test_run_python_code.py new file mode 100644 index 000000000000..f44a28569633 --- /dev/null +++ b/test_run_python_code.py @@ -0,0 +1,41 @@ +from subprocess import check_output, STDOUT, CalledProcessError +import sys +import pytest +import glob + + +PYTHON_CODE_DIR = "python_code" +ALL_FILES = glob.glob(PYTHON_CODE_DIR + "/*.py") + + +@pytest.mark.parametrize('file_path', ALL_FILES) +def test_run_file(file_path): + if 'nvidia' in file_path: + # FIXME: NVIDIA models checkoints are on cuda + pytest.skip("temporarily disabled") + if 'pytorch_fairseq_translation' in file_path: + pytest.skip("temporarily disabled") + if 'ultralytics_yolov5' in file_path: + # FIXME torch.nn.modules.module.ModuleAttributeError: 'autoShape' object has no attribute 'fuse + pytest.skip("temporarily disabled") + if 'huggingface_pytorch-transformers' in file_path: + # FIXME torch.nn.modules.module.ModuleAttributeError: 'autoShape' object has no attribute 'fuse + pytest.skip("temporarily disabled") + if 'pytorch_fairseq_roberta' in file_path: + pytest.skip("temporarily disabled") + + # We just run the python files in a separate sub-process. We really want a + # subprocess here because otherwise we might run into package versions + # issues: imagine script A that needs torchvivion 0.9 and script B that + # needs torchvision 0.10. If script A is run prior to script B in the same + # process, script B will still be run with torchvision 0.9 because the only + # "import torchvision" statement that counts is the first one, and even + # torchub sys.path shenanigans can do nothing about this. By creating + # subprocesses we're sure that all file executions are fully independent. + try: + # This is inspired (and heavily simplified) from + # https://github.com/cloudpipe/cloudpickle/blob/343da119685f622da2d1658ef7b3e2516a01817f/tests/testutils.py#L177 + out = check_output([sys.executable, file_path], stderr=STDOUT) + print(out.decode()) + except CalledProcessError as e: + raise RuntimeError(f"Script {file_path} errored with output:\n{e.output.decode()}") diff --git a/videos/pt20qa1.html b/videos/pt20qa1.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/pt20qa1.html @@ -0,0 +1 @@ + diff --git a/videos/pt20qa10.html b/videos/pt20qa10.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/pt20qa10.html @@ -0,0 +1 @@ + diff --git a/videos/pt20qa11.html b/videos/pt20qa11.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/pt20qa11.html @@ -0,0 +1 @@ + diff --git a/videos/pt20qa12.html b/videos/pt20qa12.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/pt20qa12.html @@ -0,0 +1 @@ + diff --git a/videos/pt20qa2.html b/videos/pt20qa2.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/pt20qa2.html @@ -0,0 +1 @@ + diff --git a/videos/pt20qa3.html b/videos/pt20qa3.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/pt20qa3.html @@ -0,0 +1 @@ + diff --git a/videos/pt20qa4.html b/videos/pt20qa4.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/pt20qa4.html @@ -0,0 +1 @@ + diff --git a/videos/pt20qa5.html b/videos/pt20qa5.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/pt20qa5.html @@ -0,0 +1 @@ + diff --git a/videos/pt20qa6.html b/videos/pt20qa6.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/pt20qa6.html @@ -0,0 +1 @@ + diff --git a/videos/pt20qa7.html b/videos/pt20qa7.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/pt20qa7.html @@ -0,0 +1 @@ + diff --git a/videos/pt20qa8.html b/videos/pt20qa8.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/pt20qa8.html @@ -0,0 +1 @@ + diff --git a/videos/pt20qa9.html b/videos/pt20qa9.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/pt20qa9.html @@ -0,0 +1 @@ + diff --git a/videos/ptconf1.html b/videos/ptconf1.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/ptconf1.html @@ -0,0 +1 @@ + diff --git a/videos/ptconf11.html b/videos/ptconf11.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/ptconf11.html @@ -0,0 +1 @@ + diff --git a/videos/ptconf12.html b/videos/ptconf12.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/ptconf12.html @@ -0,0 +1 @@ + diff --git a/videos/ptconf13.html b/videos/ptconf13.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/ptconf13.html @@ -0,0 +1 @@ + diff --git a/videos/ptconf15.html b/videos/ptconf15.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/ptconf15.html @@ -0,0 +1 @@ + diff --git a/videos/ptconf16.html b/videos/ptconf16.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/ptconf16.html @@ -0,0 +1 @@ + diff --git a/videos/ptconf2.html b/videos/ptconf2.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/ptconf2.html @@ -0,0 +1 @@ + diff --git a/videos/ptconf3.html b/videos/ptconf3.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/ptconf3.html @@ -0,0 +1 @@ + diff --git a/videos/ptconf4.html b/videos/ptconf4.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/ptconf4.html @@ -0,0 +1 @@ + diff --git a/videos/ptconf5.html b/videos/ptconf5.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/ptconf5.html @@ -0,0 +1 @@ + diff --git a/videos/ptconf6.html b/videos/ptconf6.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/ptconf6.html @@ -0,0 +1 @@ + diff --git a/videos/ptconf7.html b/videos/ptconf7.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/ptconf7.html @@ -0,0 +1 @@ + diff --git a/videos/ptconf8.html b/videos/ptconf8.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/ptconf8.html @@ -0,0 +1 @@ + diff --git a/videos/vid1.html b/videos/vid1.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/vid1.html @@ -0,0 +1 @@ + diff --git a/videos/vid10.html b/videos/vid10.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/vid10.html @@ -0,0 +1 @@ + diff --git a/videos/vid11.html b/videos/vid11.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/vid11.html @@ -0,0 +1 @@ + diff --git a/videos/vid12.html b/videos/vid12.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/vid12.html @@ -0,0 +1 @@ + diff --git a/videos/vid13.html b/videos/vid13.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/vid13.html @@ -0,0 +1 @@ + diff --git a/videos/vid2.html b/videos/vid2.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/vid2.html @@ -0,0 +1 @@ + diff --git a/videos/vid3.html b/videos/vid3.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/vid3.html @@ -0,0 +1 @@ + diff --git a/videos/vid4.html b/videos/vid4.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/vid4.html @@ -0,0 +1 @@ + diff --git a/videos/vid5.html b/videos/vid5.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/vid5.html @@ -0,0 +1 @@ + diff --git a/videos/vid6.html b/videos/vid6.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/vid6.html @@ -0,0 +1 @@ + diff --git a/videos/vid7.html b/videos/vid7.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/vid7.html @@ -0,0 +1 @@ + diff --git a/videos/vid8.html b/videos/vid8.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/vid8.html @@ -0,0 +1 @@ + diff --git a/videos/vid9.html b/videos/vid9.html new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/videos/vid9.html @@ -0,0 +1 @@ + diff --git a/yarn.lock b/yarn.lock index 5af6829962ee..04d705fb6637 100644 --- a/yarn.lock +++ b/yarn.lock @@ -9,10 +9,12 @@ anchor-js@^4.1.1: bootstrap@4.3.1: version "4.3.1" resolved "https://registry.yarnpkg.com/bootstrap/-/bootstrap-4.3.1.tgz#280ca8f610504d99d7b6b4bfc4b68cec601704ac" + integrity sha512-rXqOmH1VilAt2DyPzluTi2blhk17bO7ef+zLLPlWvG494pDxcM234pJ8wTc/6R40UWizAIIMgxjvxZg5kmsbag== jquery@^3.5.0: version "3.5.0" resolved "https://registry.yarnpkg.com/jquery/-/jquery-3.5.0.tgz#9980b97d9e4194611c36530e7dc46a58d7340fc9" + integrity sha512-Xb7SVYMvygPxbFMpTFQiHh1J7HClEaThguL15N/Gg37Lri/qKyhRGZYzHRyLH8Stq3Aow0LsHO2O2ci86fCrNQ== popper.js@^1.14.3: version "1.14.3"